org.apache.hadoop.yarn.api.records.Resource.toString()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(12.1k)|赞(0)|评价(0)|浏览(93)

本文整理了Java中org.apache.hadoop.yarn.api.records.Resource.toString方法的一些代码示例,展示了Resource.toString的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Resource.toString方法的具体详情如下:
包路径:org.apache.hadoop.yarn.api.records.Resource
类名称:Resource
方法名:toString

Resource.toString介绍

暂无

代码示例

代码示例来源:origin: Qihoo360/XLearning

@Override
public void onContainersAllocated(List<Container> containers) {
 for (Container acquiredContainer : containers) {
  LOG.info("Acquired container " + acquiredContainer.getId()
    + " on host " + acquiredContainer.getNodeId().getHost()
    + " , with the resource " + acquiredContainer.getResource().toString());
  String host = acquiredContainer.getNodeId().getHost();
  if (!blackHosts.contains(host)) {
   if (workerContainersAllocating.get()) {
    acquiredWorkerContainers.add(acquiredContainer);
    acquiredWorkerContainersCount.incrementAndGet();
   } else {
    acquiredPsContainers.add(acquiredContainer);
    acquiredPsContainersCount.incrementAndGet();
   }
  } else {
   LOG.info("Add container " + acquiredContainer.getId() + " to cancel list");
   cancelContainers.add(acquiredContainer);
  }
 }
 LOG.info("Current acquired worker container " + acquiredWorkerContainersCount.get()
   + " / " + neededWorkerContainersCount + " ps container " + acquiredPsContainersCount.get()
   + " / " + neededPsContainersCount);
}

代码示例来源:origin: alibaba/jstorm

private static void publishContainerStartEvent(
    final TimelineClient timelineClient, Container container, String domainId,
    UserGroupInformation ugi) {
  final TimelineEntity entity = new TimelineEntity();
  entity.setEntityId(container.getId().toString());
  entity.setEntityType(DSEntity.DS_CONTAINER.toString());
  entity.setDomainId(domainId);
  entity.addPrimaryFilter(JOYConstants.USER, ugi.getShortUserName());
  TimelineEvent event = new TimelineEvent();
  event.setTimestamp(System.currentTimeMillis());
  event.setEventType(DSEvent.DS_CONTAINER_START.toString());
  event.addEventInfo(JOYConstants.NODE, container.getNodeId().toString());
  event.addEventInfo(JOYConstants.RESOURCES, container.getResource().toString());
  entity.addEvent(event);
  try {
    ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() {
      @Override
      public TimelinePutResponse run() throws Exception {
        return timelineClient.putEntities(entity);
      }
    });
  } catch (Exception e) {
    LOG.error("Container start event could not be published for "
            + container.getId().toString(),
        e instanceof UndeclaredThrowableException ? e.getCause() : e);
  }
}

代码示例来源:origin: apache/metron

public void publishContainerStartEvent(
     final TimelineClient timelineClient, Container container, String domainId,
     UserGroupInformation ugi) {
  final TimelineEntity entity = new TimelineEntity();
  entity.setEntityId("" + container.getId());
  entity.setEntityType(ApplicationMaster.DSEntity.DS_CONTAINER.toString());
  entity.setDomainId(domainId);
  entity.addPrimaryFilter("user", ugi.getShortUserName());
  TimelineEvent event = new TimelineEvent();
  event.setTimestamp(System.currentTimeMillis());
  event.setEventType(ContainerEvents.CONTAINER_START.toString());
  event.addEventInfo("Node", container.getNodeId().toString());
  event.addEventInfo("Resources", container.getResource().toString());
  entity.addEvent(event);

  try {
   ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() {
    @Override
    public TimelinePutResponse run() throws Exception {
     return timelineClient.putEntities(entity);
    }
   });
  } catch (Exception e) {
   LOG.error("Container start event could not be published for "
           + container.getId().toString(),
       e instanceof UndeclaredThrowableException ? e.getCause() : e);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Override
public String toString() {
 return getResource().toString();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-api

@Override
public String toString() {
 return "Resource:" + getResource().toString() 
   + ", overCommitTimeout:" + getOverCommitTimeout();
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-api

@Override
public String toString() {
 return "Resource:" + getResource().toString() 
   + ", overCommitTimeout:" + getOverCommitTimeout();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-api

@Override
public String toString() {
 return "Resource:" + getResource().toString() 
   + ", overCommitTimeout:" + getOverCommitTimeout();
}

代码示例来源:origin: io.hops/hadoop-yarn-api

@Override
public String toString() {
 return "Resource:" + getResource().toString() 
   + ", overCommitTimeout:" + getOverCommitTimeout();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

/**
 * Returns the JSON string representation of the current resources allocated
 * over time
 * 
 * @return the JSON string representation of the current resources allocated
 *         over time
 */
public String toMemJSONString() {
 StringWriter json = new StringWriter();
 JsonWriter jsonWriter = new JsonWriter(json);
 readLock.lock();
 try {
  jsonWriter.beginObject();
  // jsonWriter.name("timestamp").value("resource");
  for (Map.Entry<Long, Resource> r : cumulativeCapacity.entrySet()) {
   jsonWriter.name(r.getKey().toString()).value(r.getValue().toString());
  }
  jsonWriter.endObject();
  jsonWriter.close();
  return json.toString();
 } catch (IOException e) {
  // This should not happen
  return "";
 } finally {
  readLock.unlock();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

/**
 * Returns the JSON string representation of the current resources allocated
 * over time
 * 
 * @return the JSON string representation of the current resources allocated
 *         over time
 */
public String toMemJSONString() {
 StringWriter json = new StringWriter();
 JsonWriter jsonWriter = new JsonWriter(json);
 readLock.lock();
 try {
  jsonWriter.beginObject();
  // jsonWriter.name("timestamp").value("resource");
  for (Map.Entry<Long, Resource> r : cumulativeCapacity.entrySet()) {
   jsonWriter.name(r.getKey().toString()).value(r.getValue().toString());
  }
  jsonWriter.endObject();
  jsonWriter.close();
  return json.toString();
 } catch (IOException e) {
  // This should not happen
  return "";
 } finally {
  readLock.unlock();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Override
public String toString() {
 String ret = "resource:" + resource.toString();
 ret += "; type:" + type;
 ret += "; excessReservation:" + excessReservation;
 ret +=
   "; applicationid:"
     + (application != null ? application.getApplicationId().toString()
       : "null");
 ret += "; skipped:" + skipped;
 ret += "; fulfilled reservation:" + fulfilledReservation;
 ret +=
   "; allocations(count/resource):"
     + assignmentInformation.getNumAllocations() + "/"
     + assignmentInformation.getAllocated().toString();
 ret +=
   "; reservations(count/resource):"
     + assignmentInformation.getNumReservations() + "/"
     + assignmentInformation.getReserved().toString();
 return ret;
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

/**
 * Test that the compare() method returns the expected result (0, -1, or 1).
 * If the expected result is not 0, this method will also test the resources
 * in the opposite order and check for the negative of the expected result.
 *
 * @param cluster the cluster resource
 * @param res1 the LHS resource
 * @param res2 the RHS resource
 * @param expected the expected result
 */
private void assertComparison(Resource cluster, Resource res1, Resource res2,
  int expected) {
 int actual = resourceCalculator.compare(cluster, res1, res2);
 assertEquals(String.format("Resource comparison did not give the expected "
   + "result for %s v/s %s", res1.toString(), res2.toString()),
   expected, actual);
 if (expected != 0) {
  // Try again with args in the opposite order and the negative of the
  // expected result.
  actual = resourceCalculator.compare(cluster, res2, res1);
  assertEquals(String.format("Resource comparison did not give the "
    + "expected result for %s v/s %s", res2.toString(), res1.toString()),
    expected * -1, actual);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Before
public void setUp() throws Exception {
 when(APPID.toString()).thenReturn("app_1");
 when(ATTEMPTID.toString()).thenReturn("app_attempt_1");
 when(CONTAINERID.toString()).thenReturn("container_1");
 when(RESOURCE.toString()).thenReturn("<memory:1536, vcores:1>");
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

@Test
 public void testResourceInfo() {
  Resource res = Resources.createResource(10, 1);
  // If we add a new resource (e.g disks), then
  // CapacitySchedulerPage and these RM WebServices + docs need to be updated
  // eg. ResourceInfo
  assertEquals("<memory:10, vCores:1>", res.toString());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Test
 public void testResourceInfo() {
  Resource res = Resources.createResource(10, 1);
  // If we add a new resource (e.g disks), then
  // CapacitySchedulerPage and these RM WebServices + docs need to be updated
  // eg. ResourceInfo
  assertEquals("<memory:10, vCores:1>", res.toString());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

/**
 * A helper api for creating an audit log for a failure event.
 */
static String createFailureLog(String user, String operation, String perm,
  String target, String description, ApplicationId appId,
  ApplicationAttemptId attemptId, ContainerId containerId,
  Resource resource, CallerContext callerContext) {
 StringBuilder b = createStringBuilderForFailureLog(user,
   operation, target, description, perm);
 if (appId != null) {
  add(Keys.APPID, appId.toString(), b);
 }
 if (attemptId != null) {
  add(Keys.APPATTEMPTID, attemptId.toString(), b);
 }
 if (containerId != null) {
  add(Keys.CONTAINERID, containerId.toString(), b);
 }
 if (resource != null) {
  add(Keys.RESOURCE, resource.toString(), b);
 }
 appendCallerContext(b, callerContext);
 return b.toString();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

/**
 * A helper api for creating an audit log for a successful event.
 */
static String createSuccessLog(String user, String operation, String target,
  ApplicationId appId, ApplicationAttemptId attemptId,
  ContainerId containerId, Resource resource, CallerContext callerContext,
  InetAddress ip) {
 StringBuilder b =
   createStringBuilderForSuccessEvent(user, operation, target, ip);
 if (appId != null) {
  add(Keys.APPID, appId.toString(), b);
 }
 if (attemptId != null) {
  add(Keys.APPATTEMPTID, attemptId.toString(), b);
 }
 if (containerId != null) {
  add(Keys.CONTAINERID, containerId.toString(), b);
 }
 if (resource != null) {
  add(Keys.RESOURCE, resource.toString(), b);
 }
 appendCallerContext(b, callerContext);
 return b.toString();
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-applications-distributedshell

@Override
public void onContainersUpdated(
  List<UpdatedContainer> containers) {
 for (UpdatedContainer container : containers) {
  LOG.info("Container {} updated, updateType={}, resource={}, "
      + "execType={}",
    container.getContainer().getId(),
    container.getUpdateType().toString(),
    container.getContainer().getResource().toString(),
    container.getContainer().getExecutionType());
  // TODO Remove this line with finalized updateContainer API.
  // Currently nm client needs to notify the NM to update container
  // execution type via NMClient#updateContainerResource() or
  // NMClientAsync#updateContainerResourceAsync() when
  // auto-update.containers is disabled, but this API is
  // under evolving and will need to be replaced by a proper new API.
  nmClientAsync.updateContainerResourceAsync(container.getContainer());
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

row = row.td(String.valueOf(nActiveNMs));
row.td(info.getResource().toString())._();

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-applications-distributedshell

private void publishContainerStartEvent(
  final TimelineClient timelineClient, final Container container,
  String domainId, UserGroupInformation ugi) {
 final TimelineEntity entity = new TimelineEntity();
 entity.setEntityId(container.getId().toString());
 entity.setEntityType(DSEntity.DS_CONTAINER.toString());
 entity.setDomainId(domainId);
 entity.addPrimaryFilter(USER_TIMELINE_FILTER_NAME, ugi.getShortUserName());
 entity.addPrimaryFilter(APPID_TIMELINE_FILTER_NAME, container.getId()
   .getApplicationAttemptId().getApplicationId().toString());
 TimelineEvent event = new TimelineEvent();
 event.setTimestamp(System.currentTimeMillis());
 event.setEventType(DSEvent.DS_CONTAINER_START.toString());
 event.addEventInfo("Node", container.getNodeId().toString());
 event.addEventInfo("Resources", container.getResource().toString());
 entity.addEvent(event);
 try {
  processTimelineResponseErrors(
    putContainerEntity(timelineClient,
      container.getId().getApplicationAttemptId(),
      entity));
 } catch (YarnException | IOException | ClientHandlerException e) {
  LOG.error("Container start event could not be published for "
    + container.getId().toString(), e);
 }
}

相关文章