com.hurence.logisland.component.PropertyValue类的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(16.3k)|赞(0)|评价(0)|浏览(64)

本文整理了Java中com.hurence.logisland.component.PropertyValue类的一些代码示例,展示了PropertyValue类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。PropertyValue类的具体详情如下:
包路径:com.hurence.logisland.component.PropertyValue
类名称:PropertyValue

PropertyValue介绍

[英]A PropertyValue provides a mechanism whereby the currently configured value of a processor property can be obtained in different forms.
[中]PropertyValue提供了一种机制,可以通过该机制以不同的形式获取处理器属性的当前配置值。

代码示例

代码示例来源:origin: com.hurence.logisland/logisland-solr_6_4

private Map<String, String> createMetricsTypeMapping(ControllerServiceInitializationContext context) {
  return Arrays.stream(context.getPropertyValue(METRICS_TYPE_MAPPING).asString()
      .split(","))
      .filter(StringUtils::isNotBlank)
      .map(s -> s.split(":"))
      .collect(Collectors.toMap(a -> a[0], a -> a[1]));
}

代码示例来源:origin: com.hurence.logisland/logisland-solr-client-service-api

SolrClient client;
final Boolean isCloud = context.getPropertyValue(SOLR_CLOUD).asBoolean();
final String connectionString = context.getPropertyValue(SOLR_CONNECTION_STRING).asString();
final String collection = context.getPropertyValue(SOLR_COLLECTION).asString();
setSchemaUpdateTimeout(context.getPropertyValue(SCHEMA_UPDATE_TIMEOUT).asInteger());
int batchSize = context.getPropertyValue(BATCH_SIZE).asInteger();
int numConcurrentRequests = context.getPropertyValue(CONCURRENT_REQUESTS).asInteger();
long flushInterval = context.getPropertyValue(FLUSH_INTERVAL).asLong();
updaters = new ArrayList<>(numConcurrentRequests);
for (int i = 0; i < numConcurrentRequests; i++) {

代码示例来源:origin: com.hurence.logisland/logisland-hbase-plugin

@Override
protected Collection<ValidationResult> customValidate(ValidationContext validationContext) {
  final boolean isAvroSerializer = validationContext.getPropertyValue(RECORD_SERIALIZER).asString().toLowerCase().contains("avro");
  final boolean isAvroSchemaSet = validationContext.getPropertyValue(RECORD_SCHEMA).isSet();
  final List<ValidationResult> problems = new ArrayList<>();
  if (isAvroSerializer && !isAvroSchemaSet) {
    problems.add(new ValidationResult.Builder()
        .subject(RECORD_SERIALIZER.getDisplayName())
        .valid(false)
        .explanation("an avro schema must be provided with an avro serializer")
        .build());
  }
  return problems;
}

代码示例来源:origin: com.hurence.logisland/logisland-common-processors-plugin

@Override
public void init(final ProcessContext context) {
  this.fieldsNameMapping = getFieldsNameMapping(context);
  this.nbSplitLimit = context.getPropertyValue(NB_SPLIT_LIMIT).asInteger();
  this.isEnabledSplitCounter = context.getPropertyValue(ENABLE_SPLIT_COUNTER).asBoolean();
  this.splitCounterSuffix = context.getPropertyValue(SPLIT_COUNTER_SUFFIX).asString();
}

代码示例来源:origin: com.hurence.logisland/logisland-web-analytics-plugin

@Override
public void init(final ProcessContext context) {
  debug = context.getPropertyValue(CONFIG_DEBUG).asBoolean();
  /**
   * Get source of traffic service (aka Elasticsearch service) and the cache
   */
  super.init(context);
  cacheService = context.getPropertyValue(CONFIG_CACHE_SERVICE).asControllerService(CacheService.class);
  if(cacheService == null) {
    logger.error("Cache service is not initialized!");
  }
}

代码示例来源:origin: com.hurence.logisland/logisland-hbase-plugin

@Override
public void init(ProcessContext context) {
  this.clientService = context.getPropertyValue(HBASE_CLIENT_SERVICE).asControllerService(HBaseClientService.class);
  if (context.getPropertyValue(RECORD_SCHEMA).isSet()) {
    serializer = SerializerProvider.getSerializer(
        context.getPropertyValue(RECORD_SERIALIZER).asString(),
        context.getPropertyValue(RECORD_SCHEMA).asString());
  } else {
    serializer = SerializerProvider.getSerializer(context.getPropertyValue(RECORD_SERIALIZER).asString(), null);
  }
}

代码示例来源:origin: com.hurence.logisland/logisland-outlier-detection-plugin

outlierConfig.getChunkingPolicy().setAmount(context.getPropertyValue(CHUNKING_POLICY_AMOUNT).asLong());
switch (context.getPropertyValue(CHUNKING_POLICY_TYPE).asString().toLowerCase()) {
  case "by_time":
    outlierConfig.getChunkingPolicy().setType(Type.BY_TIME);
switch (context.getPropertyValue(CHUNKING_POLICY_UNIT).asString().toLowerCase()) {
  case "milliseconds":
    outlierConfig.getChunkingPolicy().setUnit(Unit.MILLISECONDS);
outlierConfig.getRotationPolicy().setAmount(context.getPropertyValue(ROTATION_POLICY_AMOUNT).asLong());
switch (context.getPropertyValue(ROTATION_POLICY_TYPE).asString().toLowerCase()) {
  case "by_time":
    outlierConfig.getRotationPolicy().setType(Type.BY_TIME);
switch (context.getPropertyValue(ROTATION_POLICY_UNIT).asString().toLowerCase()) {
  case "milliseconds":
    outlierConfig.getRotationPolicy().setUnit(Unit.MILLISECONDS);
if (context.getPropertyValue(GLOBAL_STATISTICS_MIN).isSet()) {
  globalStatistics.setMin(context.getPropertyValue(GLOBAL_STATISTICS_MIN).asDouble());
if (context.getPropertyValue(GLOBAL_STATISTICS_MAX).isSet()) {
  globalStatistics.setMax(context.getPropertyValue(GLOBAL_STATISTICS_MAX).asDouble());
if (context.getPropertyValue(GLOBAL_STATISTICS_MEAN).isSet()) {
  globalStatistics.setMean(context.getPropertyValue(GLOBAL_STATISTICS_MEAN).asDouble());
if (context.getPropertyValue(GLOBAL_STATISTICS_STDDEV).isSet()) {
  globalStatistics.setStddev(context.getPropertyValue(GLOBAL_STATISTICS_STDDEV).asDouble());

代码示例来源:origin: com.hurence.logisland/logisland-sampling-plugin

@Override
public Collection<Record> process(ProcessContext context, Collection<Record> records) {
  SamplingAlgorithm algorithm = SamplingAlgorithm.valueOf(
      context.getPropertyValue(SAMPLING_ALGORITHM).asString().toUpperCase());
  String valueFieldName = context.getPropertyValue(RECORD_VALUE_FIELD).asString();
  String timeFieldName = context.getPropertyValue(RECORD_TIME_FIELD).asString();
  int parameter = context.getPropertyValue(SAMPLING_PARAMETER).asInteger();
  Sampler sampler = SamplerFactory.getSampler(algorithm, valueFieldName, timeFieldName, parameter);
  return sampler.sample(new ArrayList<>(records)).stream()
      .map(r -> {
        return r.setField("is_sampled", FieldType.BOOLEAN, true);
      }).collect(Collectors.toList());
}

代码示例来源:origin: com.hurence.logisland/logisland-web-analytics-plugin

private boolean has_domain_flag(String domain, String flag, ProcessContext context, Record record){
  final String source_of_traffic_suffix = context.getPropertyValue(SOURCE_OF_TRAFFIC_SUFFIX_FIELD).asString();
  final long cacheValidityPeriodSec = context.getPropertyValue(CONFIG_CACHE_VALIDITY_TIMEOUT).asLong();
  boolean has_flag = false;
    String indexName = context.getPropertyValue(ES_INDEX_FIELD).asString();
    String typeName = context.getPropertyValue(ES_TYPE_FIELD).asString();
    MultiGetQueryRecordBuilder mgqrBuilder = new MultiGetQueryRecordBuilder();
    mgqrBuilder.add(indexName, typeName, null, recordKeyName);

代码示例来源:origin: com.hurence.logisland/logisland-common-processors-plugin

String defaultCollection = context.getPropertyValue(DEFAULT_COLLECTION).asString();
if (context.getPropertyValue(TIMEBASED_INDEX).isSet()) {
  final SimpleDateFormat sdf = new SimpleDateFormat(context.getPropertyValue(DATE_FORMAT).asString());
  if (context.getPropertyValue(TIMEBASED_INDEX).getRawValue().equals(TODAY_DATE_SUFFIX.getValue())) {
    defaultCollection += "." + sdf.format(new Date());
  } else if (context.getPropertyValue(TIMEBASED_INDEX).getRawValue().equals(YESTERDAY_DATE_SUFFIX.getValue())) {
    DateTime dt = new DateTime(new Date()).minusDays(1);
    defaultCollection += "." + sdf.format(dt.toDate());
  if (context.getPropertyValue(COLLECTION_FIELD).isSet()) {
    Field eventIndexField = record.getField(context.getPropertyValue(COLLECTION_FIELD).asString());
    if (eventIndexField != null && eventIndexField.getRawValue() != null) {
      collection = eventIndexField.getRawValue().toString();

代码示例来源:origin: com.hurence.logisland/logisland-common-processors-plugin

@Override
public Collection<Record> process(ProcessContext context, Collection<Record> records) {
  final String[] keyFields = context.getPropertyValue(KEY_FIELDS).asString().split(",");
  final String keyRegexString = context.getPropertyValue(KEY_REGEX).asString();
  final Pattern keyRegex = Pattern.compile(keyRegexString);
  final String[] valueFields = context.getPropertyValue(VALUE_FIELDS).asString().split(",");
  final String valueRegexString = context.getPropertyValue(VALUE_REGEX).asString();
  final String eventType = context.getPropertyValue(RECORD_TYPE).asString();
  final boolean keepRawContent = context.getPropertyValue(KEEP_RAW_CONTENT).asBoolean();
  final Pattern valueRegex = Pattern.compile(valueRegexString);
      if (context.getPropertyValue(TIME_ZONE_RECORD_TIME).isSet()) {
        timezone = TimeZone.getTimeZone(context.getPropertyValue(TIME_ZONE_RECORD_TIME).asString());
      } else {
        timezone = TimeZone.getTimeZone("UTC");

代码示例来源:origin: com.hurence.logisland/logisland-solr_6_4

/**
 * Instantiate Chronix Client. This should be called by subclasses' @OnScheduled method to create a client
 * if one does not yet exist. If called when scheduled, closeClient() should be called by the subclasses' @OnStopped
 * method so the client will be destroyed when the processor is stopped.
 *
 * @param context The context for this processor
 * @throws ProcessException if an error occurs while creating an Chronix client
 */
protected void createSolrClient(ControllerServiceInitializationContext context) throws ProcessException {
  if (solr != null) {
    return;
  }
  // create a solr client
  final boolean isCloud = context.getPropertyValue(SOLR_CLOUD).asBoolean();
  final String connectionString = context.getPropertyValue(SOLR_CONNECTION_STRING).asString();
  final String collection = context.getPropertyValue(SOLR_COLLECTION).asString();
  if (isCloud) {
    //logInfo("creating solrCloudClient on $solrUrl for collection $collection");
    CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder().withZkHost(connectionString).build();
    cloudSolrClient.setDefaultCollection(collection);
    cloudSolrClient.setZkClientTimeout(30000);
    cloudSolrClient.setZkConnectTimeout(30000);
    solr = cloudSolrClient;
  } else {
    // logInfo(s"creating HttpSolrClient on $solrUrl for collection $collection")
    solr = new HttpSolrClient.Builder(connectionString + "/" + collection).build();
  }
}

代码示例来源:origin: com.hurence.logisland/logisland-enrichment-plugin

ipToGeoService = context.getPropertyValue(IP_TO_GEO_SERVICE).asControllerService(IpToGeoService.class);
if(ipToGeoService == null) {
  logger.error("IpToGeoService service is not initialized!");
  geoFields = propertyValue.asString();
  hierarchical = propertyValue.asBoolean();
  hierarchicalSuffix = propertyValue.asString();
  flatSuffix = propertyValue.asString();
cacheService = context.getPropertyValue(CONFIG_CACHE_SERVICE).asControllerService(CacheService.class);
if(cacheService == null) {
  logger.error("Cache service is not initialized!");

代码示例来源:origin: com.hurence.logisland/logisland-redis_4-client-service

public static JedisConnectionFactory createConnectionFactory(final ControllerServiceInitializationContext context) {
  final String redisMode = context.getPropertyValue(RedisUtils.REDIS_MODE).asString();
  final String connectionString = context.getPropertyValue(RedisUtils.CONNECTION_STRING).asString();
  final Integer dbIndex = context.getPropertyValue(RedisUtils.DATABASE).asInteger();
  final String password = context.getPropertyValue(RedisUtils.PASSWORD).asString();
  final Integer timeout = context.getPropertyValue(RedisUtils.COMMUNICATION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue();
  final JedisPoolConfig poolConfig = createJedisPoolConfig(context);
    final String sentinelMaster = context.getPropertyValue(RedisUtils.SENTINEL_MASTER).asString();
    final RedisSentinelConfiguration sentinelConfiguration = new RedisSentinelConfiguration(sentinelMaster, new HashSet<>(getTrimmedValues(sentinels)));
    final JedisShardInfo jedisShardInfo = createJedisShardInfo(sentinels[0], timeout, password);
    final Integer maxRedirects = context.getPropertyValue(RedisUtils.CLUSTER_MAX_REDIRECTS).asInteger();

代码示例来源:origin: com.hurence.logisland/logisland-common-processors-plugin

String excludesFieldName = context.getPropertyValue(EXCLUDES_FIELD).asString();
    recordKeyName = context.getPropertyValue(RECORD_KEY_FIELD).evaluate(record).asString();
    indexName = context.getPropertyValue(COLLECTION_NAME).evaluate(record).asString();
    if (context.getPropertyValue(TYPE_NAME).isSet())
      typeName = context.getPropertyValue(TYPE_NAME).evaluate(record).asString();
    includesFieldName = context.getPropertyValue(INCLUDES_FIELD).evaluate(record).asString();
  } catch (Throwable t) {
    record.setStringField(FieldDictionary.RECORD_ERRORS, "Failure in executing EL. Error: " + t.getMessage());

代码示例来源:origin: com.hurence.logisland/logisland-common-processors-plugin

private void updateRecord(ProcessContext context, Record record, Map<String, String> fieldsNameMapping) {
  String conflictPolicy = context.getPropertyValue(CONFLICT_RESOLUTION_POLICY).asString();
  if ((fieldsNameMapping == null) || (fieldsNameMapping.keySet() == null)) {
    return;
  }
  fieldsNameMapping.keySet().forEach(addedFieldName -> {
    final String defaultValueToAdd = context.getPropertyValue(addedFieldName).evaluate(record).asString();
    // field is already here
    if (record.hasField(addedFieldName)) {
      if (conflictPolicy.equals(OVERWRITE_EXISTING.getValue())) {
        overwriteObsoleteFieldValue(record, addedFieldName, defaultValueToAdd);
      }
    } else {
      record.setStringField(addedFieldName, defaultValueToAdd);
    }
  });
}

代码示例来源:origin: com.hurence.logisland/logisland-redis_4-client-service

private static JedisPoolConfig createJedisPoolConfig(final ControllerServiceInitializationContext context) {
  final JedisPoolConfig poolConfig = new JedisPoolConfig();
  poolConfig.setMaxTotal(context.getPropertyValue(RedisUtils.POOL_MAX_TOTAL).asInteger());
  poolConfig.setMaxIdle(context.getPropertyValue(RedisUtils.POOL_MAX_IDLE).asInteger());
  poolConfig.setMinIdle(context.getPropertyValue(RedisUtils.POOL_MIN_IDLE).asInteger());
  poolConfig.setBlockWhenExhausted(context.getPropertyValue(RedisUtils.POOL_BLOCK_WHEN_EXHAUSTED).asBoolean());
  poolConfig.setMaxWaitMillis(context.getPropertyValue(RedisUtils.POOL_MAX_WAIT_TIME).asTimePeriod(TimeUnit.MILLISECONDS));
  poolConfig.setMinEvictableIdleTimeMillis(context.getPropertyValue(RedisUtils.POOL_MIN_EVICTABLE_IDLE_TIME).asTimePeriod(TimeUnit.MILLISECONDS));
  poolConfig.setTimeBetweenEvictionRunsMillis(context.getPropertyValue(RedisUtils.POOL_TIME_BETWEEN_EVICTION_RUNS).asTimePeriod(TimeUnit.MILLISECONDS));
  poolConfig.setNumTestsPerEvictionRun(context.getPropertyValue(RedisUtils.POOL_NUM_TESTS_PER_EVICTION_RUN).asInteger());
  poolConfig.setTestOnCreate(context.getPropertyValue(RedisUtils.POOL_TEST_ON_CREATE).asBoolean());
  poolConfig.setTestOnBorrow(context.getPropertyValue(RedisUtils.POOL_TEST_ON_BORROW).asBoolean());
  poolConfig.setTestOnReturn(context.getPropertyValue(RedisUtils.POOL_TEST_ON_RETURN).asBoolean());
  poolConfig.setTestWhileIdle(context.getPropertyValue(RedisUtils.POOL_TEST_WHILE_IDLE).asBoolean());
  return poolConfig;
}

代码示例来源:origin: com.hurence.logisland/logisland-elasticsearch_5_4_0-client-service

/**
 * set up BackoffPolicy
 */
private BackoffPolicy getBackOffPolicy(ControllerServiceInitializationContext context)
{
  BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
  if (context.getPropertyValue(BULK_BACK_OFF_POLICY).getRawValue().equals(DEFAULT_EXPONENTIAL_BACKOFF_POLICY.getValue())) {
    backoffPolicy = BackoffPolicy.exponentialBackoff();
  } else if (context.getPropertyValue(BULK_BACK_OFF_POLICY).getRawValue().equals(EXPONENTIAL_BACKOFF_POLICY.getValue())) {
    backoffPolicy = BackoffPolicy.exponentialBackoff(
        TimeValue.timeValueMillis(context.getPropertyValue(BULK_THROTTLING_DELAY).asLong()),
        context.getPropertyValue(BULK_RETRY_NUMBER).asInteger()
    );
  } else if (context.getPropertyValue(BULK_BACK_OFF_POLICY).getRawValue().equals(CONSTANT_BACKOFF_POLICY.getValue())) {
    backoffPolicy = BackoffPolicy.constantBackoff(
        TimeValue.timeValueMillis(context.getPropertyValue(BULK_THROTTLING_DELAY).asLong()),
        context.getPropertyValue(BULK_RETRY_NUMBER).asInteger()
    );
  } else if (context.getPropertyValue(BULK_BACK_OFF_POLICY).getRawValue().equals(NO_BACKOFF_POLICY.getValue())) {
    backoffPolicy = BackoffPolicy.noBackoff();
  }
  return backoffPolicy;
}

代码示例来源:origin: com.hurence.logisland/logisland-solr_6_4

protected void createChronixStorage(ControllerServiceInitializationContext context) throws ProcessException {
  if (updater != null) {
    return;
  }
  // setup a thread pool of solr updaters
  int batchSize = context.getPropertyValue(BATCH_SIZE).asInteger();
  long flushInterval = context.getPropertyValue(FLUSH_INTERVAL).asLong();
  updater = new ChronixUpdater(solr, queue, createMetricsTypeMapping(context), batchSize, flushInterval);
  executorService.execute(updater);
}

代码示例来源:origin: com.hurence.logisland/logisland-cache_key_value-service-api

protected Cache<K,V> createCache(final ControllerServiceInitializationContext context) throws IOException, InterruptedException {
    final int capacity = context.getPropertyValue(CACHE_SIZE).asInteger();
    return new LRUCache<K,V>(capacity);
  }
}

相关文章