org.slf4j.Logger.isInfoEnabled()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.6k)|赞(0)|评价(0)|浏览(509)

本文整理了Java中org.slf4j.Logger.isInfoEnabled()方法的一些代码示例,展示了Logger.isInfoEnabled()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Logger.isInfoEnabled()方法的具体详情如下:
包路径:org.slf4j.Logger
类名称:Logger
方法名:isInfoEnabled

Logger.isInfoEnabled介绍

[英]Is the logger instance enabled for the INFO level?
[中]是否为信息级别启用了记录器实例?

代码示例

代码示例来源:origin: spring-projects/spring-framework

public void info(Object message, Throwable exception) {
  if (message instanceof String || this.logger.isInfoEnabled()) {
    this.logger.info(String.valueOf(message), exception);
  }
}

代码示例来源:origin: com.h2database/h2

@Override
public boolean isEnabled(int level) {
  switch (level) {
  case TraceSystem.DEBUG:
    return logger.isDebugEnabled();
  case TraceSystem.INFO:
    return logger.isInfoEnabled();
  case TraceSystem.ERROR:
    return logger.isErrorEnabled();
  default:
    return false;
  }
}

代码示例来源:origin: oblac/jodd

@Override
public boolean isEnabled(final Level level) {
  switch (level) {
    case TRACE: return logger.isTraceEnabled();
    case DEBUG: return logger.isDebugEnabled();
    case INFO: return logger.isInfoEnabled();
    case WARN: return logger.isWarnEnabled();
    case ERROR: return logger.isErrorEnabled();
    default:
      throw new IllegalArgumentException();
  }
}

代码示例来源:origin: eclipse-vertx/vert.x

level == DEBUG_INT && logger.isDebugEnabled() ||
 level == INFO_INT && logger.isInfoEnabled() ||
 level == WARN_INT && logger.isWarnEnabled() ||
 level == ERROR_INT && logger.isErrorEnabled()) {
LocationAwareLogger l = (LocationAwareLogger) logger;
 break;
case DEBUG_INT:
 logger.debug(msg, parameters);
 break;
case INFO_INT:
 logger.info(msg, parameters);
 break;
case WARN_INT:
 logger.warn(msg, parameters);
 break;
case ERROR_INT:
 logger.error(msg, parameters);
 break;
default:

代码示例来源:origin: alibaba/fescar

@Override
  public void run() {
    while (true) {
      if (messageStrings.size() > 0) {
        StringBuilder builder = new StringBuilder();
        while (!messageStrings.isEmpty()) {
          builder.append(messageStrings.poll()).append(BATCH_LOG_SPLIT);
        }
        if (LOGGER.isInfoEnabled()) {
          LOGGER.info(builder.toString());
        }
      }
      try {
        Thread.sleep(IDLE_CHECK_MILLS);
      } catch (InterruptedException exx) {
        LOGGER.error(exx.getMessage());
      }
    }
  }
}

代码示例来源:origin: apache/drill

private void logConfigurations(JobConf localJobConf) {
  if (LOG.isInfoEnabled()) {
   LOG.info("Logging job configuration: ");
   StringWriter outWriter = new StringWriter();
   try {
    Configuration.dumpConfiguration(localJobConf, outWriter);
   } catch (IOException e) {
    LOG.warn("Error logging job configuration", e);
   }
   LOG.info(outWriter.toString());
  }
 }
}

代码示例来源:origin: apache/hive

private boolean handleScheduleAttemptedRejection(TaskWrapper rejected) {
 // TODO: is this check even needed given what the caller checks?
 if (!enablePreemption || preemptionQueue.isEmpty()) {
  return false;
 }
 LOG.debug("Preemption Queue: {}", preemptionQueue);
 // This call checks under lock if we can actually preempt the task.
 // It is possible to have a race where the update (that's also under lock) makes the
 // task finishable or guaranteed between the remove and kill, but it's the same timing
 // issue as would happen is there was a tiny delay on the network, so we don't care.
 TaskWrapper victim = getSuitableVictimFromPreemptionQueue(rejected);
 if (victim == null) {
  return false; // Woe us.
 }
 if (LOG.isInfoEnabled()) {
  LOG.info("Invoking kill task for {} due to pre-emption to run {}",
    victim.getRequestId(), rejected.getRequestId());
 }
 // The task will either be killed or is already in the process of completing, which will
 // trigger the next scheduling run, or result in available slots being higher than 0,
 // which will cause the scheduler loop to continue.
 victim.getTaskRunnerCallable().killTask();
 // We've killed something and may want to wait for it to die.
 return true;
}

代码示例来源:origin: apache/flink

@Override
public void close() throws IOException {
  if (this.invalidLineCount > 0) {
    if (LOG.isWarnEnabled()) {
      LOG.warn("In file \"" + currentSplit.getPath() + "\" (split start: " + this.splitStart + ") " + this.invalidLineCount +" invalid line(s) were skipped.");
    }
  }
  if (this.commentCount > 0) {
    if (LOG.isInfoEnabled()) {
      LOG.info("In file \"" + currentSplit.getPath() + "\" (split start: " + this.splitStart + ") " + this.commentCount +" comment line(s) were skipped.");
    }
  }
  super.close();
}

代码示例来源:origin: apache/hive

@Override
public void cleanUpInputFileChangedOp() throws HiveException {
 super.cleanUpInputFileChangedOp();
 Path fpath = getExecContext().getCurrentInputPath();
 Path nominalPath = getNominalPath(fpath);
 Map<Operator<?>, MapOpCtx> contexts = opCtxMap.get(nominalPath);
 if (LOG.isInfoEnabled()) {
  StringBuilder builder = new StringBuilder();
  for (MapOpCtx context : contexts.values()) {
   if (builder.length() > 0) {
    builder.append(", ");
   }
   builder.append(context.alias);
  }
  if (LOG.isDebugEnabled()) {
   LOG.debug("Processing alias(es) " + builder.toString() + " for file " + fpath);
  }
 }
 // Add alias, table name, and partitions to hadoop conf so that their
 // children will inherit these
 for (Entry<Operator<?>, MapOpCtx> entry : contexts.entrySet()) {
  Operator<?> operator = entry.getKey();
  MapOpCtx context = entry.getValue();
  operator.setInputContext(context.tableName, context.partName);
 }
 currentCtxs = contexts.values().toArray(new MapOpCtx[contexts.size()]);
}

代码示例来源:origin: apache/hive

logLevel = LogLevel.valueOf(config.getRpcChannelLogLevel());
 } catch (Exception e) {
  LOG.warn("Invalid log level {}, reverting to default.", config.getRpcChannelLogLevel());
switch (logLevel) {
case DEBUG:
 logEnabled = LOG.isDebugEnabled();
 break;
case ERROR:
 break;
case INFO:
 logEnabled = LOG.isInfoEnabled();
 break;
case TRACE:
 break;
case WARN:
 logEnabled = LOG.isWarnEnabled();
 break;

代码示例来源:origin: ethereum/ethereumj

private void processDisconnect(ChannelHandlerContext ctx, DisconnectMessage msg) {
  if (logger.isInfoEnabled() && msg.getReason() == ReasonCode.USELESS_PEER) {
    if (channel.getNodeStatistics().ethInbound.get() - ethInbound > 1 ||
        channel.getNodeStatistics().ethOutbound.get() - ethOutbound > 1) {
      // it means that we've been disconnected
      // after some incorrect action from our peer
      // need to log this moment
      logger.debug("From: \t{}\t [DISCONNECT reason=BAD_PEER_ACTION]", channel);
    }
  }
  ctx.close();
  killTimers();
}

代码示例来源:origin: networknt/light-4j

private List<URL> nodeChildsToUrls(String parentPath, List<String> currentChilds) {
  List<URL> urls = new ArrayList<URL>();
  if (currentChilds != null) {
    for (String node : currentChilds) {
      String nodePath = parentPath + Constants.PATH_SEPARATOR + node;
      String data = client.readData(nodePath, true);
      try {
        URL url = URLImpl.valueOf(data);
        urls.add(url);
      } catch (Exception e) {
        if(logger.isInfoEnabled()) logger.warn(String.format("Found malformed urls from ZooKeeperRegistry, path=%s", nodePath), e);
      }
    }
  }
  return urls;
}

代码示例来源:origin: Netflix/eureka

public ReloadingClusterResolver(final ClusterResolverFactory<T> factory, final long reloadIntervalMs) {
  this.factory = factory;
  this.reloadIntervalMs = reloadIntervalMs;
  this.maxReloadIntervalMs = MAX_RELOAD_INTERVAL_MULTIPLIER * reloadIntervalMs;
  this.delegateRef = new AtomicReference<>(factory.createClusterResolver());
  this.lastUpdateTime = System.currentTimeMillis();
  this.currentReloadIntervalMs = reloadIntervalMs;
  List<T> clusterEndpoints = delegateRef.get().getClusterEndpoints();
  if (clusterEndpoints.isEmpty()) {
    logger.error("Empty Eureka server endpoint list during initialization process");
    throw new ClusterResolverException("Resolved to an empty endpoint list");
  }
  if (logger.isInfoEnabled()) {
    logger.info("Initiated with delegate resolver of type {}; next reload in {}[sec]. Loaded endpoints={}",
        delegateRef.get().getClass(), currentReloadIntervalMs / 1000, clusterEndpoints);
  }
  try {
    Monitors.registerObject(this);
  } catch (Throwable e) {
    logger.warn("Cannot register metrics", e);
  }
}

代码示例来源:origin: alibaba/fescar

@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
  LOGGER.error(FrameworkErrorCode.ExceptionCaught.errCode,
    NetUtil.toStringAddress(ctx.channel().remoteAddress()) + "connect exception. " + cause.getMessage(),
    cause);
  releaseChannel(ctx.channel(), getAddressFromChannel(ctx.channel()));
  if (LOGGER.isInfoEnabled()) {
    LOGGER.info("remove exception rm channel:" + ctx.channel());
  }
  super.exceptionCaught(ctx, cause);
}

代码示例来源:origin: searchbox-io/Jest

protected void removeNodeAndUpdateServers(final String hostToRemove) {
  log.warn("Removing host {}", hostToRemove);
  discoveredServerList.remove(hostToRemove);
  if (log.isInfoEnabled()) {
    log.info("Discovered server pool is now: {}", Joiner.on(',').join(discoveredServerList));
  }
  if (!discoveredServerList.isEmpty()) {
   client.setServers(discoveredServerList);
  } else {
   client.setServers(bootstrapServerList);
  }
}

代码示例来源:origin: spring-projects/spring-framework

public void info(Object message) {
  if (message instanceof String || this.logger.isInfoEnabled()) {
    this.logger.info(String.valueOf(message));
  }
}

代码示例来源:origin: apache/hive

static void setSearchArgument(Reader.Options options,
               List<OrcProto.Type> types,
               Configuration conf,
               boolean isOriginal) {
 String neededColumnNames = getNeededColumnNamesString(conf);
 if (neededColumnNames == null) {
  LOG.debug("No ORC pushdown predicate - no column names");
  options.searchArgument(null, null);
  return;
 }
 SearchArgument sarg = ConvertAstToSearchArg.createFromConf(conf);
 if (sarg == null) {
  LOG.debug("No ORC pushdown predicate");
  options.searchArgument(null, null);
  return;
 }
 if (LOG.isInfoEnabled()) {
  LOG.info("ORC pushdown predicate: " + sarg);
 }
 options.searchArgument(sarg, getSargColumnNames(
   neededColumnNames.split(","), types, options.getInclude(), isOriginal));
}

代码示例来源:origin: apache/flink

if (Optimizer.LOG.isWarnEnabled()) {
  Optimizer.LOG.warn("Could not instantiate InputFormat to obtain statistics."
    + " Limited statistics will be available.", t);
if (Optimizer.LOG.isWarnEnabled()) {
  Optimizer.LOG.warn("Error obtaining statistics from input format: " + t.getMessage(), t);
final long len = bs.getTotalInputSize();
if (len == BaseStatistics.SIZE_UNKNOWN) {
  if (Optimizer.LOG.isInfoEnabled()) {
    Optimizer.LOG.info("Compiler could not determine the size of input '" + inFormatDescription + "'. Using default estimates.");

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public boolean isEnabled(int level) {
 switch (level) {
 case com.jcraft.jsch.Logger.DEBUG:
  return LOG.isDebugEnabled();
 case com.jcraft.jsch.Logger.INFO:
  return LOG.isInfoEnabled();
 case com.jcraft.jsch.Logger.WARN:
  return LOG.isWarnEnabled();
 case com.jcraft.jsch.Logger.ERROR:
 case com.jcraft.jsch.Logger.FATAL:
  return LOG.isErrorEnabled();
 default:
  return false;
 }
}

代码示例来源:origin: apache/hive

private static boolean checkInputFormatForLlapEncode(Configuration conf, String ifName) {
 String formatList = HiveConf.getVar(conf, ConfVars.LLAP_IO_ENCODE_FORMATS);
 if (LOG.isDebugEnabled()) {
  LOG.debug("Checking " + ifName + " against " + formatList);
 }
 String[] formats = StringUtils.getStrings(formatList);
 if (formats != null) {
  for (String format : formats) {
   // TODO: should we check isAssignableFrom?
   if (ifName.equals(format)) {
    if (LOG.isInfoEnabled()) {
     LOG.info("Using SerDe-based LLAP reader for " + ifName);
    }
    return true;
   }
  }
 }
 return false;
}

相关文章