org.apache.hadoop.yarn.api.records.Resource.newInstance()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(9.7k)|赞(0)|评价(0)|浏览(122)

本文整理了Java中org.apache.hadoop.yarn.api.records.Resource.newInstance方法的一些代码示例,展示了Resource.newInstance的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Resource.newInstance方法的具体详情如下:
包路径:org.apache.hadoop.yarn.api.records.Resource
类名称:Resource
方法名:newInstance

Resource.newInstance介绍

[英]Create a new Resource instance with the given CPU and memory values and additional resource values as set in the othersparameter. Note that the CPU and memory settings in the othersparameter will be ignored.
[中]使用给定的CPU和内存值以及othersparameter中设置的其他资源值创建一个新的资源实例。请注意,其他参数中的CPU和内存设置将被忽略。

代码示例

代码示例来源:origin: apache/hive

@Override
public Resource getResource() {
 return Resource.newInstance(memory, vcores);
}

代码示例来源:origin: apache/flink

private Resource getContainerResource() {
  final long mem = taskManagerParameters.taskManagerTotalMemoryMB();
  final int containerMemorySizeMB;
  if (mem <= Integer.MAX_VALUE) {
    containerMemorySizeMB = (int) mem;
  } else {
    containerMemorySizeMB = Integer.MAX_VALUE;
    LOG.error("Decreasing container size from {} MB to {} MB (integer value overflow)",
      mem, containerMemorySizeMB);
  }
  // Resource requirements for worker containers
  int taskManagerSlots = taskManagerParameters.numSlots();
  int vcores = config.getInteger(YarnConfigOptions.VCORES, Math.max(taskManagerSlots, 1));
  return Resource.newInstance(containerMemorySizeMB, vcores);
}

代码示例来源:origin: alibaba/jstorm

/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 */
public ContainerRequest setupContainerAskForRM(int containerMemory, int containerVirtualCores, int priority, String[] racks, String[] hosts) {
  Priority pri = Priority.newInstance(priority);
  Resource capability = Resource.newInstance(containerMemory,
      containerVirtualCores);
  ContainerRequest request = new ContainerRequest(capability, hosts, racks,
      pri, false);
  LOG.info("By Thrift Server Requested container  ask: " + request.toString());
  return request;
}

代码示例来源:origin: apache/hive

public static Resource getContainerResource(Configuration conf) {
 int memory = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0 ?
  HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) :
  conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB);
 int cpus = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES) > 0 ?
  HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES) :
  conf.getInt(MRJobConfig.MAP_CPU_VCORES, MRJobConfig.DEFAULT_MAP_CPU_VCORES);
 return Resource.newInstance(memory, cpus);
}

代码示例来源:origin: apache/incubator-gobblin

private Resource prepareContainerResource(GetNewApplicationResponse newApplicationResponse) {
 int memoryMbs = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_MEMORY_MBS_KEY);
 int maximumMemoryCapacity = newApplicationResponse.getMaximumResourceCapability().getMemory();
 if (memoryMbs > maximumMemoryCapacity) {
  LOGGER.info(String.format("Specified AM memory [%d] is above the maximum memory capacity [%d] of the "
    + "cluster, using the maximum memory capacity instead.", memoryMbs, maximumMemoryCapacity));
  memoryMbs = maximumMemoryCapacity;
 }
 int vCores = this.config.getInt(GobblinYarnConfigurationKeys.APP_MASTER_CORES_KEY);
 int maximumVirtualCoreCapacity = newApplicationResponse.getMaximumResourceCapability().getVirtualCores();
 if (vCores > maximumVirtualCoreCapacity) {
  LOGGER.info(String.format("Specified AM vcores [%d] is above the maximum vcore capacity [%d] of the "
    + "cluster, using the maximum vcore capacity instead.", memoryMbs, maximumMemoryCapacity));
  vCores = maximumVirtualCoreCapacity;
 }
 // Set up resource type requirements for ApplicationMaster
 return Resource.newInstance(memoryMbs, vCores);
}

代码示例来源:origin: apache/drill

public static Resource getContainerResource(Configuration conf) {
 int memory = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0 ?
  HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) :
  conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB);
 int cpus = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES) > 0 ?
  HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES) :
  conf.getInt(MRJobConfig.MAP_CPU_VCORES, MRJobConfig.DEFAULT_MAP_CPU_VCORES);
 return Resource.newInstance(memory, cpus);
}

代码示例来源:origin: apache/hive

@Override
public Resource getTotalResources() {
 int memory = 0;
 int vcores = 0;
 readLock.lock();
 try {
  int numInstancesFound = 0;
  for (LlapServiceInstance inst : activeInstances.getAll()) {
   Resource r = inst.getResource();
   memory += r.getMemory();
   vcores += r.getVirtualCores();
   numInstancesFound++;
  }
  if (LOG.isDebugEnabled()) {
   LOG.debug("GetTotalResources: numInstancesFound={}, totalMem={}, totalVcores={}",
     numInstancesFound, memory, vcores);
  }
 } finally {
  readLock.unlock();
 }
 return Resource.newInstance(memory, vcores);
}

代码示例来源:origin: alibaba/jstorm

/**
 * Setup the request that will be sent to the RM for the container ask.
 *
 * @return the setup ResourceRequest to be sent to RM
 */
public ContainerRequest setupContainerAskForRM(int containerMemory, int containerVirtualCores, int priority, String host) {
  // setup requirements for hosts
  // using * as any host will do for the jstorm app
  // set the priority for the request
  Priority pri = Priority.newInstance(priority);
  // Set up resource type requirements
  // For now, memory and CPU are supported so we set memory and cpu requirements
  Resource capability = Resource.newInstance(containerMemory,
      containerVirtualCores);
  ContainerRequest request = new ContainerRequest(capability, null, null,
      pri);
  LOG.info("By Thrift Server Requested container  ask: " + request.toString());
  return request;
}

代码示例来源:origin: apache/hive

public DynamicServiceInstance(ServiceRecord srv) throws IOException {
 super(srv, IPC_LLAP);
 final Endpoint shuffle = srv.getInternalEndpoint(IPC_SHUFFLE);
 final Endpoint mng = srv.getInternalEndpoint(IPC_MNG);
 final Endpoint outputFormat = srv.getInternalEndpoint(IPC_OUTPUTFORMAT);
 final Endpoint services = srv.getExternalEndpoint(IPC_SERVICES);
 this.mngPort =
   Integer.parseInt(RegistryTypeUtils.getAddressField(mng.addresses.get(0),
     AddressTypes.ADDRESS_PORT_FIELD));
 this.shufflePort =
   Integer.parseInt(RegistryTypeUtils.getAddressField(shuffle.addresses.get(0),
     AddressTypes.ADDRESS_PORT_FIELD));
 this.outputFormatPort =
   Integer.valueOf(RegistryTypeUtils.getAddressField(outputFormat.addresses.get(0),
     AddressTypes.ADDRESS_PORT_FIELD));
 this.serviceAddress =
   RegistryTypeUtils.getAddressField(services.addresses.get(0), AddressTypes.ADDRESS_URI);
 String memStr = srv.get(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname, "");
 String coreStr = srv.get(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname, "");
 try {
  this.resource = Resource.newInstance(Integer.parseInt(memStr), Integer.parseInt(coreStr));
 } catch (NumberFormatException ex) {
  throw new IOException("Invalid resource configuration for a LLAP node: memory "
    + memStr + ", vcores " + coreStr);
 }
}

代码示例来源:origin: apache/hive

readLock.unlock();
return Resource.newInstance(memory, vcores);

代码示例来源:origin: apache/flink

this.defaultTaskManagerMemoryMB = ConfigurationUtils.getTaskManagerHeapMemory(flinkConfig).getMebiBytes();
this.defaultCpus = flinkConfig.getInteger(YarnConfigOptions.VCORES, numberOfTaskSlots);
this.resource = Resource.newInstance(defaultTaskManagerMemoryMB, defaultCpus);

代码示例来源:origin: alibaba/jstorm

Resource capability = Resource.newInstance(jstormClientContext.amMemory, jstormClientContext.amVCores);
appContext.setResource(capability);

代码示例来源:origin: apache/hive

int memoryPerInstance = serviceInstance.getResource().getMemory();
int memoryPerExecutor = (int)(memoryPerInstance / (double) numVcores);
resourcePerExecutor = Resource.newInstance(memoryPerExecutor, 1);

代码示例来源:origin: apache/metron

public static Resource toResource(EnumMap<Resources, Integer> resourceMap) {
  return Resource.newInstance(resourceMap.get(Resources.MEMORY), resourceMap.get(Resources.V_CORE));
 }
}

代码示例来源:origin: apache/flink

final YarnResourceManagerCallbackHandler callbackHandler = new YarnResourceManagerCallbackHandler();
AMRMClientAsync<AMRMClient.ContainerRequest> resourceManagerClient = mock(AMRMClientAsync.class);
doReturn(Collections.singletonList(Collections.nCopies(numInitialTaskManagers, new AMRMClient.ContainerRequest(Resource.newInstance(1024 * 1024, 1), null, null, Priority.newInstance(0)))))
  .when(resourceManagerClient).getMatchingRequests(any(Priority.class), anyString(), any(Resource.class));

代码示例来源:origin: apache/metron

Resource capability = Resource.newInstance(amMemory, amVCores);
appContext.setResource(capability);

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

protected Node(NodeId nodeid) {
 labels = null;
 resource = Resource.newInstance(0, 0);
 running = false;
 nodeId = nodeid;
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-api

/**
 * Get a Resource object with for the maximum allocation possible.
 * @return a Resource object with the maximum allocation for the scheduler
 */
public static Resource getResourceTypesMaximumAllocation() {
 Resource ret = Resource.newInstance(0, 0);
 for (ResourceInformation entry : resourceTypesArray) {
  ret.setResourceValue(entry.getName(),
    entry.getMaximumAllocation());
 }
 return ret;
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

public Resource divideAndCeil(Resource numerator, long denominator) {
 Resource ret = Resource.newInstance(numerator);
 int maxLength = ResourceUtils.getNumberOfKnownResourceTypes();
 for (int i = 0; i < maxLength; i++) {
  ResourceInformation resourceInformation = ret.getResourceInformation(i);
  resourceInformation
    .setValue(divideAndCeil(resourceInformation.getValue(), denominator));
 }
 return ret;
}

代码示例来源:origin: linkedin/TonY

private AMRMClient.ContainerRequest setupContainerRequestForRM(TensorFlowContainerRequest request) {
 Priority priority = Priority.newInstance(request.getPriority());
 Resource capability = Resource.newInstance(request.getMemory(), request.getVCores());
 Utils.setCapabilityGPU(capability, request.getGPU());
 session.addAllocationId(request.getJobName(), lastAllocationRequestId);
 AMRMClient.ContainerRequest containerRequest = new AMRMClient.ContainerRequest(capability, null, null, priority,
   lastAllocationRequestId++);
 LOG.info("Requested container ask: " + containerRequest.toString());
 return containerRequest;
}

相关文章