本文整理了Java中org.apache.hadoop.metrics2.lib.MetricsRegistry.newQuantiles()
方法的一些代码示例,展示了MetricsRegistry.newQuantiles()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。MetricsRegistry.newQuantiles()
方法的具体详情如下:
包路径:org.apache.hadoop.metrics2.lib.MetricsRegistry
类名称:MetricsRegistry
方法名:newQuantiles
[英]Create a mutable metric that estimates quantiles of a stream of values
[中]创建一个可变度量来估计值流的分位数
代码示例来源:origin: org.apache.hadoop/hadoop-common
public ReadWriteDiskValidatorMetrics() {
registry = new MetricsRegistry(RECORD_INFO);
fileReadQuantiles = new MutableQuantiles[quantileIntervals.length];
for (int i = 0; i < fileReadQuantiles.length; i++) {
int interval = quantileIntervals[i];
fileReadQuantiles[i] = registry.newQuantiles(
"readLatency" + interval + "s",
"File read latency", "Ops", "latencyMicros", interval);
}
fileWriteQuantiles = new MutableQuantiles[quantileIntervals.length];
for (int i = 0; i < fileWriteQuantiles.length; i++) {
int interval = quantileIntervals[i];
fileWriteQuantiles[i] = registry.newQuantiles(
"writeLatency" + interval + "s",
"File write latency", "Ops", "latencyMicros", interval);
}
}
代码示例来源:origin: apache/kylin
public QueryMetrics(int[] intervals) {
queryLatencyTimeMillisQuantiles = new MutableQuantiles[intervals.length];
scanRowCountQuantiles = new MutableQuantiles[intervals.length];
resultRowCountQuantiles = new MutableQuantiles[intervals.length];
cacheHitCountQuantiles = new MutableQuantiles[intervals.length];
for (int i = 0; i < intervals.length; i++) {
int interval = intervals[i];
queryLatencyTimeMillisQuantiles[i] = registry.newQuantiles("QueryLatency" + interval + "s", "Query queue time in milli second", "ops", "", interval);
scanRowCountQuantiles[i] = registry.newQuantiles("ScanRowCount" + interval + "s", "Scan row count in milli second", "ops", "", interval);
resultRowCountQuantiles[i] = registry.newQuantiles("ResultRowCount" + interval + "s", "Result row count in milli second", "ops", "", interval);
cacheHitCountQuantiles[i] = registry.newQuantiles("CacheHitCount" + interval + "s", "Cache Hit Count in milli second", "ops", "", interval);
}
queryLatency = registry.newRate("QueryLatency", "", true);
scanRowCount = registry.newRate("ScanRowCount", "", true);
resultRowCount = registry.newRate("ResultRowCount", "", true);
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
for (int i = 0; i < intervals.length; i++) {
int interval = intervals[i];
rpcQueueTimeMillisQuantiles[i] = registry.newQuantiles("rpcQueueTime"
+ interval + "s", "rpc queue time in milli second", "ops",
"latency", interval);
rpcProcessingTimeMillisQuantiles[i] = registry.newQuantiles(
"rpcProcessingTime" + interval + "s",
"rpc processing time in milli second", "ops", "latency", interval);
deferredRpcProcessingTimeMillisQuantiles[i] = registry
.newQuantiles("deferredRpcProcessingTime" + interval + "s",
"deferred rpc processing time in milli seconds", "ops",
"latency", interval);
代码示例来源:origin: apache/hive
private LlapDaemonIOMetrics(String displayName, String sessionId, int[] intervals) {
this.name = displayName;
this.sessionId = sessionId;
this.registry = new MetricsRegistry("LlapDaemonIORegistry");
this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
final int len = intervals == null ? 0 : intervals.length;
this.decodingTimes = new MutableQuantiles[len];
for (int i=0; i<len; i++) {
int interval = intervals[i];
LOG.info("Created interval " + LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s");
decodingTimes[i] = registry.newQuantiles(
LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s",
LlapDaemonIOInfo.PercentileDecodingTime.description(),
"ops", "latency", interval);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
MutableQuantiles[] getGroupsQuantiles = new MutableQuantiles[length];
for (int i = 0; i < length; i++) {
getGroupsQuantiles[i] = metrics.registry.newQuantiles(
"getGroups" + intervals[i] + "s",
"Get groups", "ops", "latency", intervals[i]);
代码示例来源:origin: apache/hive
for (int i=0; i<len; i++) {
int interval = intervals[i];
percentileTimeToKill[i] = registry.newQuantiles(
LlapDaemonExecutorInfo.ExecutorMaxPreemptionTimeToKill.name() + "_" + interval + "s",
LlapDaemonExecutorInfo.ExecutorMaxPreemptionTimeToKill.description(),
"ops", "latency", interval);
percentileTimeLost[i] = registry.newQuantiles(
LlapDaemonExecutorInfo.ExecutorMaxPreemptionTimeLost.name() + "_" + interval + "s",
LlapDaemonExecutorInfo.ExecutorMaxPreemptionTimeLost.description(),
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
JournalMetrics(Journal journal) {
this.journal = journal;
syncsQuantiles = new MutableQuantiles[QUANTILE_INTERVALS.length];
for (int i = 0; i < syncsQuantiles.length; i++) {
int interval = QUANTILE_INTERVALS[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal sync time", "ops", "latencyMicros", interval);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
for (int i = 0; i < len; i++) {
int interval = intervals[i];
metadataOperationLatencyQuantiles[i] = registry.newQuantiles(
"metadataOperationLatency" + interval + "s",
"Meatadata Operation Latency in ms", "ops", "latency", interval);
dataFileIoLatencyQuantiles[i] = registry.newQuantiles(
"dataFileIoLatency" + interval + "s",
"Data File Io Latency in ms", "ops", "latency", interval);
flushIoLatencyQuantiles[i] = registry.newQuantiles(
"flushIoLatency" + interval + "s",
"Data flush Io Latency in ms", "ops", "latency", interval);
syncIoLatencyQuantiles[i] = registry.newQuantiles(
"syncIoLatency" + interval + "s",
"Data sync Io Latency in ms", "ops", "latency", interval);
readIoLatencyQuantiles[i] = registry.newQuantiles(
"readIoLatency" + interval + "s",
"Data read Io Latency in ms", "ops", "latency", interval);
writeIoLatencyQuantiles[i] = registry.newQuantiles(
"writeIoLatency" + interval + "s",
"Data write Io Latency in ms", "ops", "latency", interval);
代码示例来源:origin: org.apache.hadoop/hadoop-common
return registry.newQuantiles(info.name(), annotation.about(),
annotation.sampleName(), annotation.valueName(), annotation.interval());
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal syncs", "ops", "latency", interval);
numTransactionsBatchedInSync[i] = registry.newQuantiles(
"numTransactionsBatchedInSync" + interval + "s",
"Number of Transactions batched in sync", "ops",
"count", interval);
storageBlockReportQuantiles[i] = registry.newQuantiles(
"storageBlockReport" + interval + "s",
"Storage block report", "ops", "latency", interval);
cacheReportQuantiles[i] = registry.newQuantiles(
"cacheReport" + interval + "s",
"Cache report", "ops", "latency", interval);
generateEDEKTimeQuantiles[i] = registry.newQuantiles(
"generateEDEKTime" + interval + "s",
"Generate EDEK time", "ops", "latency", interval);
warmUpEDEKTimeQuantiles[i] = registry.newQuantiles(
"warmupEDEKTime" + interval + "s",
"Warm up EDEK time", "ops", "latency", interval);
resourceCheckTimeQuantiles[i] = registry.newQuantiles(
"resourceCheckTime" + interval + "s",
"resource check time", "ops", "latency", interval);
editLogTailTimeQuantiles[i] = registry.newQuantiles(
"editLogTailTime" + interval + "s",
"Edit log tailing time", "ops", "latency", interval);
editLogFetchTimeQuantiles[i] = registry.newQuantiles(
"editLogFetchTime" + interval + "s",
"Edit log fetch time", "ops", "latency", interval);
numEditLogLoadedQuantiles[i] = registry.newQuantiles(
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
"packetAckRoundTripTimeNanos" + interval + "s",
"Packet Ack RTT in ns", "ops", "latency", interval);
flushNanosQuantiles[i] = registry.newQuantiles(
"flushNanos" + interval + "s",
"Disk flush latency in ns", "ops", "latency", interval);
fsyncNanosQuantiles[i] = registry.newQuantiles(
"fsyncNanos" + interval + "s", "Disk fsync latency in ns",
"ops", "latency", interval);
sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
"sendDataPacketBlockedOnNetworkNanos" + interval + "s",
"Time blocked on network while sending a packet in ns",
"ops", "latency", interval);
sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
"sendDataPacketTransferNanos" + interval + "s",
"Time reading from disk and writing to network while sending " +
"a packet in ns", "ops", "latency", interval);
ramDiskBlocksEvictionWindowMsQuantiles[i] = registry.newQuantiles(
"ramDiskBlocksEvictionWindows" + interval + "s",
"Time between the RamDisk block write and eviction in ms",
"ops", "latency", interval);
ramDiskBlocksLazyPersistWindowMsQuantiles[i] = registry.newQuantiles(
"ramDiskBlocksLazyPersistWindows" + interval + "s",
"Time between the RamDisk block write and disk persist in ms",
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
private IPCLoggerChannelMetrics(IPCLoggerChannel ch) {
this.ch = ch;
Configuration conf = new HdfsConfiguration();
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
if (intervals != null) {
writeEndToEndLatencyQuantiles = new MutableQuantiles[intervals.length];
writeRpcLatencyQuantiles = new MutableQuantiles[intervals.length];
for (int i = 0; i < writeEndToEndLatencyQuantiles.length; i++) {
int interval = intervals[i];
writeEndToEndLatencyQuantiles[i] = registry.newQuantiles(
"writesE2E" + interval + "s",
"End-to-end time for write operations", "ops", "LatencyMicros", interval);
writeRpcLatencyQuantiles[i] = registry.newQuantiles(
"writesRpc" + interval + "s",
"RPC RTT for write operations", "ops", "LatencyMicros", interval);
}
} else {
writeEndToEndLatencyQuantiles = null;
writeRpcLatencyQuantiles = null;
}
}
代码示例来源:origin: apache/accumulo
Metrics2ReplicationMetrics(Master master, MetricsSystem system) {
this.master = master;
this.system = system;
pathModTimes = new HashMap<>();
this.registry = new MetricsRegistry(Interns.info(NAME, DESCRIPTION));
this.registry.tag(MsInfo.ProcessName, MetricsSystemHelper.getProcessName());
replicationUtil = new ReplicationUtil(master.getContext());
replicationQueueTimeQuantiles = registry.newQuantiles(REPLICATION_QUEUE_TIME_QUANTILES,
"Replication queue time quantiles in milliseconds", "ops", "latency", 600);
replicationQueueTimeStat = registry.newStat(REPLICATION_QUEUE_TIME,
"Replication queue time statistics in milliseconds", "ops", "latency", true);
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
JournalMetrics(Journal journal) {
this.journal = journal;
syncsQuantiles = new MutableQuantiles[QUANTILE_INTERVALS.length];
for (int i = 0; i < syncsQuantiles.length; i++) {
int interval = QUANTILE_INTERVALS[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal sync time", "ops", "latencyMicros", interval);
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
JournalMetrics(Journal journal) {
this.journal = journal;
syncsQuantiles = new MutableQuantiles[QUANTILE_INTERVALS.length];
for (int i = 0; i < syncsQuantiles.length; i++) {
int interval = QUANTILE_INTERVALS[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal sync time", "ops", "latencyMicros", interval);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-router
private RouterMetrics() {
registry = new MetricsRegistry(RECORD_INFO);
registry.tag(RECORD_INFO, "Router");
getNewApplicationLatency = registry.newQuantiles("getNewApplicationLatency",
"latency of get new application", "ops", "latency", 10);
submitApplicationLatency = registry.newQuantiles("submitApplicationLatency",
"latency of submit application", "ops", "latency", 10);
killApplicationLatency = registry.newQuantiles("killApplicationLatency",
"latency of kill application", "ops", "latency", 10);
getApplicationReportLatency =
registry.newQuantiles("getApplicationReportLatency",
"latency of get application report", "ops", "latency", 10);
getApplicationsReportLatency =
registry.newQuantiles("getApplicationsReportLatency",
"latency of get applications report", "ops", "latency", 10);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
NameNodeMetrics(String processName, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.jvmMetrics = jvmMetrics;
registry.tag(ProcessName, processName).tag(SessionId, sessionId);
final int len = intervals.length;
syncsQuantiles = new MutableQuantiles[len];
blockReportQuantiles = new MutableQuantiles[len];
cacheReportQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal syncs", "ops", "latency", interval);
blockReportQuantiles[i] = registry.newQuantiles(
"blockReport" + interval + "s",
"Block report", "ops", "latency", interval);
cacheReportQuantiles[i] = registry.newQuantiles(
"cacheReport" + interval + "s",
"Cache report", "ops", "latency", interval);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-aws
/**
* Create a quantiles in the registry.
* @param op statistic to collect
* @param sampleName sample name of the quantiles
* @param valueName value name of the quantiles
* @param interval interval of the quantiles in seconds
* @return the created quantiles metric
*/
protected final MutableQuantiles quantiles(Statistic op,
String sampleName,
String valueName,
int interval) {
return registry.newQuantiles(op.getSymbol(), op.getDescription(),
sampleName, valueName, interval);
}
代码示例来源:origin: org.apache.accumulo/accumulo-master
Metrics2ReplicationMetrics(Master master, MetricsSystem system) {
this.master = master;
this.system = system;
pathModTimes = new HashMap<>();
this.registry = new MetricsRegistry(Interns.info(NAME, DESCRIPTION));
this.registry.tag(MsInfo.ProcessName, MetricsSystemHelper.getProcessName());
replicationUtil = new ReplicationUtil(master);
replicationQueueTimeQuantiles = registry.newQuantiles(REPLICATION_QUEUE_TIME_QUANTILES,
"Replication queue time quantiles in milliseconds", "ops", "latency", 600);
replicationQueueTimeStat = registry.newStat(REPLICATION_QUEUE_TIME,
"Replication queue time statistics in milliseconds", "ops", "latency", true);
}
代码示例来源:origin: org.apache.hive/hive-llap-server
private LlapDaemonIOMetrics(String displayName, String sessionId, int[] intervals) {
this.name = displayName;
this.sessionId = sessionId;
this.registry = new MetricsRegistry("LlapDaemonIORegistry");
this.registry.tag(ProcessName, MetricsUtils.METRICS_PROCESS_NAME).tag(SessionId, sessionId);
final int len = intervals == null ? 0 : intervals.length;
this.decodingTimes = new MutableQuantiles[len];
for (int i=0; i<len; i++) {
int interval = intervals[i];
LOG.info("Created interval " + LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s");
decodingTimes[i] = registry.newQuantiles(
LlapDaemonIOInfo.PercentileDecodingTime.name() + "_" + interval + "s",
LlapDaemonIOInfo.PercentileDecodingTime.description(),
"ops", "latency", interval);
}
}
内容来源于网络,如有侵权,请联系作者删除!