org.slf4j.Logger.debug()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.0k)|赞(0)|评价(0)|浏览(292)

本文整理了Java中org.slf4j.Logger.debug()方法的一些代码示例,展示了Logger.debug()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Logger.debug()方法的具体详情如下:
包路径:org.slf4j.Logger
类名称:Logger
方法名:debug

Logger.debug介绍

[英]Log a message at the DEBUG level.
[中]在调试级别记录消息。

代码示例

代码示例来源:origin: spring-projects/spring-framework

public void debug(Object message) {
  if (message instanceof String || this.logger.isDebugEnabled()) {
    this.logger.debug(String.valueOf(message));
  }
}

代码示例来源:origin: skylot/jadx

public static void setClipboardString(String text) {
    try {
      Clipboard clipboard = Toolkit.getDefaultToolkit().getSystemClipboard();
      Transferable transferable = new StringSelection(text);
      clipboard.setContents(transferable, null);
      LOG.debug("String '{}' copied to clipboard", text);
    } catch (Exception e) {
      LOG.error("Failed copy string '{}' to clipboard", text, e);
    }
  }
}

代码示例来源:origin: skylot/jadx

public void printMissingClasses() {
    int count = missingClasses.size();
    if (count == 0) {
      return;
    }
    LOG.warn("Found {} references to unknown classes", count);
    if (LOG.isDebugEnabled()) {
      List<String> clsNames = new ArrayList<>(missingClasses);
      Collections.sort(clsNames);
      for (String cls : clsNames) {
        LOG.debug("  {}", cls);
      }
    }
  }
}

代码示例来源:origin: apache/hbase

public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
 try (Table table = connection.getTable(bulkLoadTableName)) {
  List<Delete> lstDels = new ArrayList<>();
  for (byte[] row : rows) {
   Delete del = new Delete(row);
   lstDels.add(del);
   LOG.debug("orig deleting the row: " + Bytes.toString(row));
  }
  table.delete(lstDels);
  LOG.debug("deleted " + rows.size() + " original bulkload rows");
 }
}

代码示例来源:origin: perwendel/spark

private void add(HttpMethod method, String url, String acceptedType, Object target) {
  RouteEntry entry = new RouteEntry();
  entry.httpMethod = method;
  entry.path = url;
  entry.target = target;
  entry.acceptedType = acceptedType;
  LOG.debug("Adds route: " + entry);
  // Adds to end of list
  routes.add(entry);
}

代码示例来源:origin: apache/hbase

private void updateFileLists(List<String> activeFiles, List<String> archiveFiles)
  throws IOException {
 List<String> newlyArchived = new ArrayList<>();
 for (String spath : activeFiles) {
  if (!fs.exists(new Path(spath))) {
   newlyArchived.add(spath);
  }
 }
 if (newlyArchived.size() > 0) {
  activeFiles.removeAll(newlyArchived);
  archiveFiles.addAll(newlyArchived);
 }
 LOG.debug(newlyArchived.size() + " files have been archived.");
}

代码示例来源:origin: apache/storm

public List<HBMessage> sendAll(HBMessage m) throws PacemakerConnectionException, InterruptedException {
  List<HBMessage> responses = new ArrayList<HBMessage>();
  LOG.debug("Using servers: {}", servers);
  for (String s : servers) {
    try {
      HBMessage response = getClientForServer(s).send(m);
      responses.add(response);
    } catch (PacemakerConnectionException e) {
      LOG.warn("Failed to connect to the pacemaker server {}, attempting to reconnect", s);
      getClientForServer(s).reconnect();
    }
  }
  if (responses.size() == 0) {
    throw new PacemakerConnectionException("Failed to connect to any Pacemaker.");
  }
  return responses;
}

代码示例来源:origin: apache/flink

@Override
  public void log (int level, String category, String message, Throwable ex) {
    final String logString = "[KRYO " + category + "] " + message;
    switch (level) {
      case Log.LEVEL_ERROR:
        log.error(logString, ex);
        break;
      case Log.LEVEL_WARN:
        log.warn(logString, ex);
        break;
      case Log.LEVEL_INFO:
        log.info(logString, ex);
        break;
      case Log.LEVEL_DEBUG:
        log.debug(logString, ex);
        break;
      case Log.LEVEL_TRACE:
        log.trace(logString, ex);
        break;
    }
  }
}

代码示例来源:origin: Activiti/Activiti

protected void logException() {
  if (exception instanceof JobNotFoundException || exception instanceof ActivitiTaskAlreadyClaimedException) {
    // reduce log level, because this may have been caused because of job deletion due to cancelActiviti="true"
    log.info("Error while closing command context",
         exception);
  } else if (exception instanceof ActivitiOptimisticLockingException) {
    // reduce log level, as normally we're not interested in logging this exception
    log.debug("Optimistic locking exception : " + exception);
  } else {
    log.error("Error while closing command context",
         exception);
  }
}

代码示例来源:origin: Netflix/eureka

private List<AwsEndpoint> getClusterEndpointsFromConfig() {
  String[] availZones = clientConfig.getAvailabilityZones(clientConfig.getRegion());
  String myZone = InstanceInfo.getZone(availZones, myInstanceInfo);
  Map<String, List<String>> serviceUrls = EndpointUtils
      .getServiceUrlsMapFromConfig(clientConfig, myZone, clientConfig.shouldPreferSameZoneEureka());
  List<AwsEndpoint> endpoints = new ArrayList<>();
  for (String zone : serviceUrls.keySet()) {
    for (String url : serviceUrls.get(zone)) {
      try {
        endpoints.add(new AwsEndpoint(url, getRegion(), zone));
      } catch (Exception ignore) {
        logger.warn("Invalid eureka server URI: {}; removing from the server pool", url);
      }
    }
  }
  logger.debug("Config resolved to {}", endpoints);
  if (endpoints.isEmpty()) {
    logger.error("Cannot resolve to any endpoints from provided configuration: {}", serviceUrls);
  }
  return endpoints;
}

代码示例来源:origin: alibaba/fescar

@Override
public void onCheckMessage(long msgId, ChannelHandlerContext ctx, ServerMessageSender sender) {
  try {
    sender.sendResponse(msgId, ctx.channel(), HeartbeatMessage.PONG);
  } catch (Throwable throwable) {
    LOGGER.error("", "send response error", throwable);
  }
  if (LOGGER.isDebugEnabled()) {
    LOGGER.debug("received PING from " + ctx.channel().remoteAddress());
  }
}

代码示例来源:origin: Graylog2/graylog2-server

@Override
public ResourceModel processResourceModel(ResourceModel resourceModel, Configuration configuration) {
  LOG.debug("Map for resource model <" + resourceModel + ">:");
  final List<Resource> resources = new ArrayList<>();
  for (Resource resource : resourceModel.getResources()) {
    resources.add(resource);
    resources.addAll(findChildResources(resource));
  }
  logResources(resources);
  return resourceModel;
}

代码示例来源:origin: apache/flink

@Override
public void initializeState(StateInitializationContext context) throws Exception {
  super.initializeState(context);
  checkState(checkpointedState == null,    "The reader state has already been initialized.");
  checkpointedState = context.getOperatorStateStore().getSerializableListState("splits");
  int subtaskIdx = getRuntimeContext().getIndexOfThisSubtask();
  if (context.isRestored()) {
    LOG.info("Restoring state for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx);
    // this may not be null in case we migrate from a previous Flink version.
    if (restoredReaderState == null) {
      restoredReaderState = new ArrayList<>();
      for (TimestampedFileInputSplit split : checkpointedState.get()) {
        restoredReaderState.add(split);
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("{} (taskIdx={}) restored {}.", getClass().getSimpleName(), subtaskIdx, restoredReaderState);
      }
    }
  } else {
    LOG.info("No state to restore for the {} (taskIdx={}).", getClass().getSimpleName(), subtaskIdx);
  }
}

代码示例来源:origin: apache/hive

static void logException(String msg, Exception e) {
 if (LOG.isDebugEnabled()) {
  LOG.debug(msg, e);
 } else {
  LOG.info(msg + ": " + e.getMessage());
 }
}

代码示例来源:origin: skylot/jadx

private static Release checkForNewRelease() throws IOException {
  String version = JadxDecompiler.getVersion();
  if (version.contains("dev")) {
    LOG.debug("Ignore check for update: development version");
    return null;
  }
  List<Release> list = get(GITHUB_RELEASES_URL, RELEASES_LIST_TYPE);
  if (list == null) {
    return null;
  }
  list.removeIf(release -> release.getName().equalsIgnoreCase(version) || release.isPreRelease());
  if (list.isEmpty()) {
    return null;
  }
  list.sort(RELEASE_COMPARATOR);
  Release latest = list.get(list.size() - 1);
  if (VersionComparator.checkAndCompare(version, latest.getName()) >= 0) {
    return null;
  }
  LOG.info("Found new jadx version: {}", latest);
  return latest;
}

代码示例来源:origin: apache/zookeeper

@Override
  public synchronized void shutdown() {
    if (!canShutdown()) {
      LOG.debug("ZooKeeper server is not running, so not proceeding to shutdown!");
      return;
    }
    LOG.info("Shutting down");
    try {
      super.shutdown();
    } catch (Exception e) {
      LOG.warn("Ignoring unexpected exception during shutdown", e);
    }
    try {
      if (syncProcessor != null) {
        syncProcessor.shutdown();
      }
    } catch (Exception e) {
      LOG.warn("Ignoring unexpected exception in syncprocessor shutdown",
          e);
    }
  }
}

代码示例来源:origin: gocd/gocd

protected boolean runImpl() {
  try {
    Message message = consumer.receive();
    if (message == null) {
      LOG.debug("Message consumer was closed.");
      return true;
    }
    ObjectMessage omessage = (ObjectMessage) message;
    daemonThreadStatsCollector.captureStats(thread.getId());
    listener.onMessage((GoMessage) omessage.getObject());
  } catch (JMSException e) {
    LOG.warn("Error receiving message. Message receiving will continue despite this error.", e);
  } catch (Exception e) {
    LOG.error("Exception thrown in message handling by listener {}", listener, e);
  } finally {
    daemonThreadStatsCollector.clearStats(thread.getId());
  }
  return false;
}

代码示例来源:origin: apache/kafka

private boolean threadShouldExit(long now, long curHardShutdownTimeMs) {
  if (!hasActiveExternalCalls()) {
    log.trace("All work has been completed, and the I/O thread is now exiting.");
    return true;
  }
  if (now >= curHardShutdownTimeMs) {
    log.info("Forcing a hard I/O thread shutdown. Requests in progress will be aborted.");
    return true;
  }
  log.debug("Hard shutdown in {} ms.", curHardShutdownTimeMs - now);
  return false;
}

代码示例来源:origin: apache/kafka

@Override
  public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
    if (exception != null) {
      if (exception instanceof RetriableException) {
        log.debug("Asynchronous auto-commit of offsets {} failed due to retriable error: {}", offsets,
            exception);
        nextAutoCommitTimer.updateAndReset(retryBackoffMs);
      } else {
        log.warn("Asynchronous auto-commit of offsets {} failed: {}", offsets, exception.getMessage());
      }
    } else {
      log.debug("Completed asynchronous auto-commit of offsets {}", offsets);
    }
  }
});

代码示例来源:origin: apache/geode

@Override
protected void rebalanceCache() {
 try {
  getLogger().info("Rebalancing: " + this.cache);
  RebalanceResults results = RegionHelper.rebalanceCache(this.cache);
  if (getLogger().isDebugEnabled()) {
   getLogger().debug("Done rebalancing: " + this.cache);
   getLogger().debug(RegionHelper.getRebalanceResultsMessage(results));
  }
 } catch (Exception e) {
  getLogger().warn("Rebalance failed because of the following exception:", e);
 }
}

相关文章