本文整理了Java中org.mortbay.util.ajax.JSON.toString()
方法的一些代码示例,展示了JSON.toString()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JSON.toString()
方法的具体详情如下:
包路径:org.mortbay.util.ajax.JSON
类名称:JSON
方法名:toString
暂无
代码示例来源:origin: org.mortbay.jetty/jetty-util
return toString(scratch,0,i);
return toString(scratch,0,i);
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Returned information is a JSON representation of a map with
* volume name as the key and value is a map of volume attribute
* keys to its values
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
Preconditions.checkNotNull(data, "Storage not yet initialized");
return JSON.toString(data.getVolumeInfoMap());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override
public String getSnapshotStats() {
Map<String, Object> info = new HashMap<String, Object>();
info.put("SnapshottableDirectories", this.getNumSnapshottableDirs());
info.put("Snapshots", this.getNumSnapshots());
return JSON.toString(info);
}
代码示例来源:origin: io.fabric8/fabric-hadoop
/**
* Returned information is a JSON representation of a map with
* volume name as the key and value is a map of volume attribute
* keys to its values
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
final Map<String, Object> info = new HashMap<String, Object>();
Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
info.put(v.directory, innerInfo);
}
return JSON.toString(info);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override // NameNodeMXBean
public String getCorruptFiles() {
List<String> list = new ArrayList<String>();
Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks;
try {
corruptFileBlocks = listCorruptFileBlocks("/", null);
int corruptFileCount = corruptFileBlocks.size();
if (corruptFileCount != 0) {
for (FSNamesystem.CorruptFileBlockInfo c : corruptFileBlocks) {
list.add(c.toString());
}
}
} catch (IOException e) {
LOG.warn("Get corrupt file blocks returned error: " + e.getMessage());
}
return JSON.toString(list);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
@Override // NameNodeMxBean
public String getJournalTransactionInfo() {
Map<String, String> txnIdMap = new HashMap<String, String>();
txnIdMap.put("LastAppliedOrWrittenTxId",
Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId()));
txnIdMap.put("MostRecentCheckpointTxId",
Long.toString(this.getFSImage().getMostRecentCheckpointTxId()));
return JSON.toString(txnIdMap);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Convert a counters object into a JSON string
*/
public synchronized String makeJsonString() {
Map<String, Map<String, Long>> data =
new HashMap<String, Map<String, Long>>();
for (Group group : this) {
Map<String, Long> groupData = new HashMap<String, Long>();
data.put(group.getDisplayName(), groupData);
for (Counter counter : group) {
groupData.put(counter.getDisplayName(), counter.getCounter());
}
}
return JSON.toString(data);
}
代码示例来源:origin: org.mortbay.jetty/cometd-jetty
protected Object filterJSON(Client from, Channel to, JSON.Generator generator)
{
String json = JSON.toString(generator);
Object data = JSON.parse (json);
return filter(from,to,data);
}
代码示例来源:origin: org.mortbay.jetty/cometd-server
public void setAdvice(JSON.Literal advice)
{
synchronized(this)
{
_adviceVersion++;
_advice=advice;
_multiFrameAdvice=new JSON.Literal(JSON.toString(multiFrameAdvice(advice)));
}
}
代码示例来源:origin: org.mortbay.jetty/cometd-jetty
public void setAdvice(JSON.Literal advice)
{
synchronized(this)
{
_adviceVersion++;
_advice=advice;
_multiFrameAdvice=new JSON.Literal(JSON.toString(multiFrameAdvice(advice)));
}
}
代码示例来源:origin: org.mortbay.jetty/cometd-server
protected Object filterJSON(Client from, Channel to, JSON.Generator generator)
{
String json=JSON.toString(generator);
Object data=JSON.parse(json);
return filter(from,to,data);
}
代码示例来源:origin: io.hops/hadoop-yarn-server-common
/**
* Method invoked by a JMX client to get the state of the CertificateLocalization service.
* Under the attributes tab.
* @return It returns a map with the name of the material and the number of references.
*/
@Override
public String getState() {
ImmutableMap<StorageKey, CryptoMaterial> state;
try {
lock.lock();
state = ImmutableMap.copyOf(materialLocation);
} finally {
lock.unlock();
}
ReturnState<String, String> returnState = new ReturnState<>();
ReturnState<String, Integer> internalState = new ReturnState<>();
for (Map.Entry<StorageKey, CryptoMaterial> entry : state.entrySet()) {
internalState.put(entry.getKey().username, entry.getValue().getRequestedApplications());
}
returnState.put(JMX_MATERIALIZED_KEY, JSON.toString(internalState));
return JSON.toString(returnState);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Returned information is a JSON representation of a map with
* name node host name as the key and block pool Id as the value
*/
@Override // DataNodeMXBean
public String getNamenodeAddresses() {
final Map<String, Integer> info = new HashMap<String, Integer>();
for (NamespaceService ns : namespaceManager.getAllNamenodeThreads()) {
if (ns != null && ns.initialized()) {
info.put(ns.getNNSocketAddress().getHostName(), ns.getNamespaceId());
}
}
return JSON.toString(info);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Returned information is a JSON representation of map with host name as the
* key and value is a map of dead node attribute keys to its values
*/
@Override // NameNodeMXBean
public String getDeadNodes() {
final Map<String, Map<String, Object>> info =
new HashMap<String, Map<String, Object>>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
blockManager.getDatanodeManager().fetchDatanodes(null, dead, false);
for (DatanodeDescriptor node : dead) {
Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
.put("lastContact", getLastContact(node))
.put("decommissioned", node.isDecommissioned())
.put("xferaddr", node.getXferAddr())
.build();
info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo);
}
return JSON.toString(info);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Returned information is a JSON representation of a map with
* name node host name as the key and block pool Id as the value.
* Note that, if there are multiple NNs in an NA nameservice,
* a given block pool may be represented twice.
*/
@Override // DataNodeMXBean
public String getNamenodeAddresses() {
final Map<String, String> info = new HashMap<String, String>();
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
if (bpos != null) {
for (BPServiceActor actor : bpos.getBPServiceActors()) {
info.put(actor.getNNSocketAddress().getHostName(),
bpos.getBlockPoolId());
}
}
}
return JSON.toString(info);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-common
@GET
@Path("{" + PATH + ":.*}")
@Produces({MediaType.APPLICATION_JSON})
public Response get(
@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
@QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
) throws IOException {
LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
final Map<String, Object> m = new TreeMap<String, Object>();
m.put(PATH, path);
m.put(OP, op);
final String js = JSON.toString(m);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
@GET
@Path("{" + PATH + ":.*}")
@Produces({MediaType.APPLICATION_JSON})
public Response get(
@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
@QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op
) throws IOException {
LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op);
final Map<String, Object> m = new TreeMap<String, Object>();
m.put(PATH, path);
m.put(OP, op);
final String js = JSON.toString(m);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-common-test
public void testTagsMetricsPair() throws IOException {
TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
outputRecord.getMetricsCopy());
String s = JSON.toString(pair);
assertEquals(
"[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
"{\"testMetric1\":1,\"testMetric2\":33}]", s);
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
public void testTagsMetricsPair() throws IOException {
TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
outputRecord.getMetricsCopy());
String s = JSON.toString(pair);
assertEquals(
"[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
"{\"testMetric1\":1,\"testMetric2\":33}]", s);
}
代码示例来源:origin: ch.cern.hadoop/hadoop-common
public void testTagsMetricsPair() throws IOException {
TagsMetricsPair pair = new TagsMetricsPair(outputRecord.getTagsCopy(),
outputRecord.getMetricsCopy());
String s = JSON.toString(pair);
assertEquals(
"[{\"testTag1\":\"testTagValue1\",\"testTag2\":\"testTagValue2\"},"+
"{\"testMetric1\":1,\"testMetric2\":33}]", s);
}
内容来源于网络,如有侵权,请联系作者删除!