本文整理了Java中backtype.storm.tuple.Tuple.getLongByField()
方法的一些代码示例,展示了Tuple.getLongByField()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Tuple.getLongByField()
方法的具体详情如下:
包路径:backtype.storm.tuple.Tuple
类名称:Tuple
方法名:getLongByField
暂无
代码示例来源:origin: alibaba/jstorm
private long getMsgId(Tuple input) {
return input.getLongByField(msgIdFieldName);
}
代码示例来源:origin: alibaba/jstorm
@Override
public void execute(Tuple input) {
if (isTupleTs()) {
long ts = input.getLongByField(tupleTsFieldName);
if (waterMarkEventGenerator.track(input.getSourceGlobalStreamid(), ts)) {
windowManager.add(input, ts);
} else {
LOG.info("Received a late tuple {} with ts {}. This will not processed.", input, ts);
}
} else {
windowManager.add(input);
}
}
代码示例来源:origin: alibaba/jstorm
long txid = input.getLongByField(CheckpointSpout.CHECKPOINT_FIELD_TXID);
if (shouldProcessTransaction(action, txid)) {
LOG.debug("Processing action {}, txid {}", action, txid);
代码示例来源:origin: hmsonline/storm-cassandra
@Override
public long mapToIncrementAmount(Tuple tuple) {
return tuple.getLongByField(incrementAmountField);
}
代码示例来源:origin: com.alibaba.jstorm/jstorm-core
private long getMsgId(Tuple input) {
return input.getLongByField(msgIdFieldName);
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public int compareTo(ComparableTuple o) {
return this.tuple.getLongByField(Field.TIMESTAMP).compareTo(
o.tuple.getLongByField(Field.TIMESTAMP));
}
}
代码示例来源:origin: ptgoetz/storm-hdfs
@Override
public Writable key(Tuple tuple) {
if(this.key == null){
this.key = new LongWritable();
}
this.key.set(tuple.getLongByField(this.keyField));
return this.key;
}
代码示例来源:origin: apache/eagle
@Override
public void execute(Tuple tuple) {
Long startTime = tuple.getLongByField("startTime");
LOG.info("get startTime {}", startTime);
Long endTime = startTime + stormConfig.aggregationDuration * 1000;
if (metricsAggregateContainer.aggregate(startTime, endTime)) {
collector.ack(tuple);
LOG.info("succeed startTime {}", startTime);
} else {
collector.fail(tuple);
LOG.warn("failed startTime {}", startTime);
}
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public void execute(Tuple tuple) {
String component = tuple.getSourceComponent();
if (component.equals(Component.GLOBAL_MEDIAN)) {
long timestamp = tuple.getLongByField(Field.TIMESTAMP);
double globalMedianLoad = tuple.getDoubleByField(Field.GLOBAL_MEDIAN_LOAD);
globalMedianBacklog.put(timestamp, globalMedianLoad);
// ordered based on the timestamps
while (!unprocessedMessages.isEmpty() &&
unprocessedMessages.peek().tuple.getLongByField(Field.TIMESTAMP).equals(timestamp)) {
Tuple perPlugMedianTuple = unprocessedMessages.poll().tuple;
processPerPlugMedianTuple(perPlugMedianTuple);
}
} else {
processPerPlugMedianTuple(tuple);
}
}
代码示例来源:origin: com.srotya.tau/tau-dengine
@Override
public void execute(Tuple input) {
String errorSourceBolt = input.getStringByField(Constants.ERROR_SOURCE_BOLT);
JsonObject object = new JsonObject();
object.addProperty(Constants.ERROR_TIMESTAMP, input.getLongByField(Constants.ERROR_TIMESTAMP));
object.addProperty(Constants.ERROR_MESSAGE, input.getStringByField(Constants.ERROR_MESSAGE));
object.addProperty(Constants.ERROR_EXCEPTION, input.getStringByField(Constants.ERROR_EXCEPTION));
object.addProperty(Constants.ERROR_SOURCE, input.getStringByField(Constants.ERROR_SOURCE));
object.addProperty(Constants.ERROR_SOURCE_BOLT, errorSourceBolt);
object.addProperty(Constants.ERROR_EXCEPTION_MESSAGE,
input.getStringByField(Constants.ERROR_EXCEPTION_MESSAGE));
if (debug) {
logger.info(object.toString());
}
collector.emit(Constants.KAFKA_ERROR_STREAM, input, new Values(errorSourceBolt, object.toString()));
collector.ack(input);
}
代码示例来源:origin: Symantec/hendrix
@Override
public void execute(Tuple input) {
String errorSourceBolt = input.getStringByField(Constants.ERROR_SOURCE_BOLT);
JsonObject object = new JsonObject();
object.addProperty(Constants.ERROR_TIMESTAMP, input.getLongByField(Constants.ERROR_TIMESTAMP));
object.addProperty(Constants.ERROR_MESSAGE, input.getStringByField(Constants.ERROR_MESSAGE));
object.addProperty(Constants.ERROR_EXCEPTION, input.getStringByField(Constants.ERROR_EXCEPTION));
object.addProperty(Constants.ERROR_SOURCE, input.getStringByField(Constants.ERROR_SOURCE));
object.addProperty(Constants.ERROR_SOURCE_BOLT, errorSourceBolt);
object.addProperty(Constants.ERROR_EXCEPTION_MESSAGE,
input.getStringByField(Constants.ERROR_EXCEPTION_MESSAGE));
if (debug) {
logger.info(object.toString());
}
collector.emit(Constants.KAFKA_ERROR_STREAM, input, new Values(errorSourceBolt, object.toString()));
collector.ack(input);
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public void execute(Tuple input) {
long timestamp = input.getLongByField(Field.TIMESTAMP);
String id = input.getStringByField(Field.ID);
double dataInstanceAnomalyScore = input.getDoubleByField(Field.DATAINST_ANOMALY_SCORE);
Queue<Double> slidingWindow = slidingWindowMap.get(id);
if (slidingWindow == null) {
slidingWindow = new LinkedList<>();
}
// update sliding window
slidingWindow.add(dataInstanceAnomalyScore);
if (slidingWindow.size() > this.windowLength) {
slidingWindow.poll();
}
slidingWindowMap.put(id, slidingWindow);
double sumScore = 0.0;
for (double score : slidingWindow) {
sumScore += score;
}
collector.emit(new Values(id, sumScore, timestamp, input.getValue(3), dataInstanceAnomalyScore));
collector.ack(input);
}
代码示例来源:origin: com.alibaba.jstorm/jstorm-core
@Override
public void execute(Tuple input) {
if (isTupleTs()) {
long ts = input.getLongByField(tupleTsFieldName);
if (waterMarkEventGenerator.track(input.getSourceGlobalStreamid(), ts)) {
windowManager.add(input, ts);
} else {
LOG.info("Received a late tuple {} with ts {}. This will not processed.", input, ts);
}
} else {
windowManager.add(input);
}
}
代码示例来源:origin: Symantec/hendrix
@Override
public void execute(Tuple tuple) {
JsonObject obj = new JsonObject();
obj.addProperty(Constants.FIELD_TIMESTAMP, tuple.getLongByField(Constants.FIELD_TIMESTAMP));
obj.addProperty(Constants.FIELD_AGGREGATION_WINDOW,
tuple.getIntegerByField(Constants.FIELD_AGGREGATION_WINDOW));
obj.addProperty(Constants.FIELD_RULE_ACTION_ID, tuple.getStringByField(Constants.FIELD_RULE_ACTION_ID));
obj.addProperty(Constants.FIELD_AGGREGATION_KEY, tuple.getStringByField(Constants.FIELD_AGGREGATION_KEY));
if (tuple.contains(Constants.FIELD_STATE_TRACK)) {
obj.addProperty(Constants.FIELD_STATE_TRACK, tuple.getBooleanByField(Constants.FIELD_STATE_TRACK));
} else if (tuple.contains(Constants.FIELD_AGGREGATION_VALUE)) {
obj.addProperty(Constants.FIELD_AGGREGATION_VALUE,
tuple.getValueByField(Constants.FIELD_AGGREGATION_VALUE).toString());
} else {
// invalid event
collector.fail(tuple);
return;
}
collector.emit(tuple, new Values(tuple.getStringByField(Constants.FIELD_RULE_ACTION_ID) + "_"
+ tuple.getStringByField(Constants.FIELD_AGGREGATION_KEY), gson.toJson(obj)));
collector.ack(tuple);
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public void execute(Tuple input) {
long timestamp = input.getLongByField(Field.TIMESTAMP);
if (timestamp > previousTimestamp) {
// a new batch of observation, calculate the scores of old batch and then emit
if (!observationList.isEmpty()) {
List<ScorePackage> scorePackageList = dataInstanceScorer.getScores(observationList);
for (ScorePackage scorePackage : scorePackageList) {
collector.emit(new Values(scorePackage.getId(), scorePackage.getScore(),
previousTimestamp, scorePackage.getObj()));
}
observationList.clear();
}
previousTimestamp = timestamp;
}
observationList.add(input.getValueByField(Field.OBSERVATION));
collector.ack(input);
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public void execute(Tuple input) {
long minute = input.getLongByField(Field.TIMESTAMP_MINUTES);
MutableLong count = counts.get(minute);
if (count == null) {
if (buffer.isFull()) {
long oldMinute = (Long) buffer.remove();
counts.remove(oldMinute);
}
count = new MutableLong(1);
counts.put(minute, count);
buffer.add(minute);
} else {
count.increment();
}
collector.emit(input, new Values(minute, count.longValue()));
collector.ack(input);
}
代码示例来源:origin: Symantec/hendrix
@Test
public void testBasicSerialization() {
AggregationSerializerBolt bolt = new AggregationSerializerBolt();
Map<String, String> conf = new HashMap<>();
bolt.prepare(conf, null, collector);
when(tuple.getLongByField(Constants.FIELD_TIMESTAMP)).thenReturn(143322L);
when(tuple.getStringByField(Constants.FIELD_RULE_ACTION_ID)).thenReturn("34_22");
when(tuple.getStringByField(Constants.FIELD_AGGREGATION_KEY)).thenReturn("host1");
when(tuple.getIntegerByField(Constants.FIELD_AGGREGATION_WINDOW)).thenReturn(100);
when(tuple.getBooleanByField(Constants.FIELD_STATE_TRACK)).thenReturn(true);
when(tuple.contains(Constants.FIELD_STATE_TRACK)).thenReturn(true);
bolt.execute(tuple);
verify(collector, times(1)).ack(tuple);
}
代码示例来源:origin: apache/eagle
@Override
public List<StreamEvent> map(Tuple tuple) throws Exception {
long timestamp;
if (tuple.getFields().contains(TIMESTAMP_FIELD)) {
try {
timestamp = tuple.getLongByField("timestamp");
} catch (Exception ex) {
// if timestamp is not null
LOGGER.error(ex.getMessage(), ex);
timestamp = 0;
}
} else {
timestamp = System.currentTimeMillis();
}
Object[] values = new Object[tuple.getFields().size()];
for (int i = 0; i < tuple.getFields().size(); i++) {
values[i] = tuple.getValue(i);
}
StreamEvent event = new StreamEvent();
event.setTimestamp(timestamp);
event.setStreamId(streamId);
event.setData(values);
return Collections.singletonList(event);
}
}
代码示例来源:origin: com.srotya.tau/tau-dengine
protected void aggregate(Tuple tuple) {
try {
aggregationHit.scope(Utils.separateRuleActionId(tuple.getStringByField(Constants.FIELD_RULE_ACTION_ID))
.getKey().toString()).incr();
engine.aggregate(tuple.getLongByField(Constants.FIELD_TIMESTAMP),
tuple.getIntegerByField(Constants.FIELD_AGGREGATION_WINDOW),
tuple.getStringByField(Constants.FIELD_RULE_ACTION_ID),
tuple.getStringByField(Constants.FIELD_AGGREGATION_KEY),
tuple.getStringByField(Constants.FIELD_AGGREGATION_VALUE));
buffer.add(tuple);
if (buffer.size() >= bufferSize) {
flushAckAndClearBuffer();
}
} catch (AggregationRejectException e) {
StormContextUtil.emitErrorTuple(collector, tuple, MarkovianAggregationBolt.class, "",
"Aggregation rejected", e);
collector.ack(tuple);
} catch (IOException e) {
failAndClearBuffer();
StormContextUtil.emitErrorTuple(collector, tuple, MarkovianAggregationBolt.class, "",
"Aggregation flush failed", e);
}
}
代码示例来源:origin: mayconbordin/storm-applications
@Override
public void execute(Tuple tuple) {
int operation = tuple.getIntegerByField(Field.SLIDING_WINDOW_ACTION);
double value = tuple.getDoubleByField(Field.VALUE);
long timestamp = tuple.getLongByField(Field.TIMESTAMP);
if (operation == SlidingWindowAction.ADD){
double median = medianCalc.getMedian(value);
if (lastUpdatedTs < timestamp) {
// the sliding window has moved
lastUpdatedTs = timestamp;
collector.emit(new Values(timestamp, median));
}
} else {
medianCalc.remove(value);
}
}
内容来源于网络,如有侵权,请联系作者删除!