org.apache.hadoop.mapred.QueueManager.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(11.3k)|赞(0)|评价(0)|浏览(110)

本文整理了Java中org.apache.hadoop.mapred.QueueManager.<init>方法的一些代码示例,展示了QueueManager.<init>的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。QueueManager.<init>方法的具体详情如下:
包路径:org.apache.hadoop.mapred.QueueManager
类名称:QueueManager
方法名:<init>

QueueManager.<init>介绍

[英]Create an instance that supports hierarchical queues, defined in the passed in configuration file.

This is mainly used for testing purposes and should not called from production code.
[中]创建一个支持分层队列的实例,该队列在传入的配置文件中定义。
这主要用于测试目的,不应从生产代码中调用。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

@Test (timeout=5000)
public void testDefaultConfig() {
 QueueManager manager = new QueueManager(true);
 assertEquals(manager.getRoot().getChildren().size(), 2);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

public FakeTaskTrackerManager() {
 JobConf conf = new JobConf();
 queueManager = new QueueManager(conf);
 trackers.put("tt1", new TaskTrackerStatus("tt1", "tt1.host", 1,
        new ArrayList<TaskStatus>(), 0,
        maxMapTasksPerTracker, maxReduceTasksPerTracker));
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

Configuration conf = getConfiguration();
QueueManager manager = new QueueManager(conf);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

f = writeFile();
QueueManager manager = new QueueManager(f.getCanonicalPath(), true);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

public void testMultipleQueues() {
 JobConf conf = new JobConf();
 conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY,
   "q1,q2,Q3");
 QueueManager qMgr = new QueueManager(conf);
 Set<String> expQueues = new TreeSet<String>();
 expQueues.add("q1");
 expQueues.add("q2");
 expQueues.add("Q3");
 verifyQueues(expQueues, qMgr.getLeafQueueNames());
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

private void setupConfForNoAccess() throws Exception {
 currentUGI = UserGroupInformation.getLoginUser();
 String userName = currentUGI.getUserName();
 String[] queueNames = {"qu1", "qu2"};
 // Only user u1 has access for queue qu1
 // Only group g2 has acls for the queue qu2
 createQueuesConfigFile(
   queueNames, new String[]{"u1", " g2"}, new String[]{"u1", " g2"});
 conf = new JobConf();
 conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
 queueManager = new QueueManager(conf);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

public FakeTaskTrackerManager() {
 JobConf conf = new JobConf();
 queueManager = new QueueManager(conf);
 
 TaskTracker tt1 = new TaskTracker("tt1");
 tt1.setStatus(new TaskTrackerStatus("tt1", "tt1.host", 1,
                   new ArrayList<TaskStatus>(), 0,
                   maxMapTasksPerTracker, 
                   maxReduceTasksPerTracker));
 trackers.put("tt1", tt1);
 
 TaskTracker tt2 = new TaskTracker("tt2");
 tt2.setStatus(new TaskTrackerStatus("tt2", "tt2.host", 2,
                   new ArrayList<TaskStatus>(), 0,
                   maxMapTasksPerTracker, 
                   maxReduceTasksPerTracker));
 trackers.put("tt2", tt2);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testEmptyFile() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 try {
  new QueueManager(QUEUES_CONFIG_FILE_PATH, true);
  fail("Should throw an exception as configuration is wrong ");
 } catch (Exception re) {
  re.printStackTrace();
  LOG.info(re.getMessage());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

public void testSchedulerInfo() {
 JobConf conf = new JobConf();
 conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY,
   "qq1,qq2");
 QueueManager qMgr = new QueueManager(conf);
 qMgr.setSchedulerInfo("qq1", "queueInfoForqq1");
 qMgr.setSchedulerInfo("qq2", "queueInfoForqq2");
 assertEquals(qMgr.getSchedulerInfo("qq2"), "queueInfoForqq2");
 assertEquals(qMgr.getSchedulerInfo("qq1"), "queueInfoForqq1");
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testDefault() throws Exception {
 deleteQueuesConfigFile();
 QueueManager qm = new QueueManager();
 Queue root = qm.getRoot();
 assertEquals(root.getChildren().size(), 1);
 assertEquals(root.getChildren().iterator().next().getName(), "default");
 assertNull(root.getChildren().iterator().next().getChildren());
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testInvalidName() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 Element queues = createQueuesNode(doc);
 Element q1 = createQueue(doc, "");
 queues.appendChild(q1);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 try {
  new QueueManager(QUEUES_CONFIG_FILE_PATH, false);
  fail("Should throw an exception as configuration is wrong ");
 } catch (Exception re) {
  re.printStackTrace();
  LOG.info(re.getMessage());
 }
 deleteQueuesConfigFile();
 doc = createDocument();
 queues = createQueuesNode(doc);
 q1 = doc.createElement("queue");
 queues.appendChild(q1);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 try {
  new QueueManager(QUEUES_CONFIG_FILE_PATH, true);
  fail("Should throw an exception as configuration is wrong ");
 } catch (RuntimeException re) {
  re.printStackTrace();
  LOG.info(re.getMessage());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
 * Test to verify that the refresh of queue properties fails if scheduler
 * fails to reload itself.
 * 
 * @throws Exception
 */
// @Test
public void testRefreshWithSchedulerFailure()
  throws Exception {
 JobQueueInfo[] queues = getSimpleQueueHierarchy();
 // write the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 QueueManager qManager = new QueueManager();
 // No change in configuration. Just Refresh the QueueManager and make sure
 // it fails.
 try {
  qManager.refreshQueues(null,
    new MyTaskScheduler().new MyFailingQueueRefresher());
  fail("Queue-refresh should have failed!");
 } catch (Exception e) {
  // Refresh failed as expected. Check the error message.
  assertTrue(
    "Exception message should point to a refresh-failure in scheduler!",
    e.getMessage().contains(
      QueueManager.MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testValidation() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 Element queues = createQueuesNode(doc);
 Element q1 = createQueue(doc, "q1");
 q1.appendChild(createAcls(doc, "acl-submit-job", "u1"));
 q1.appendChild(createAcls(doc, "acl-administer-jobs", "u2"));
 q1.appendChild(createQueue(doc, "p15"));
 q1.appendChild(createQueue(doc, "p16"));
 queues.appendChild(q1);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 try {
  new QueueManager(QUEUES_CONFIG_FILE_PATH, false);
  fail("Should throw an exception as configuration is wrong ");
 } catch (RuntimeException re) {
  LOG.info(re.getMessage());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testMissingConfigFile() throws Exception {
 deleteQueuesConfigFile(); // deletes file
 try {
  new QueueManager(QUEUES_CONFIG_FILE_PATH, true);
  fail("Should throw an exception for missing file when " +
     "explicitly passed.");
 } catch (RuntimeException re) {
 }
 // If we just want to pick up the queues from the class loader
 // it should fall through to the default. The class loader is set to
 // load CONFIG for the "mapred-queues.xml" resource, but it's missing
 // so should fall through to mapred-queues-default.xml
 QueueManager qm = new QueueManager();
 List<JobQueueInfo> rootQueues =
  qm.getRoot().getJobQueueInfo().getChildren();
 assertEquals(1, rootQueues.size());
 assertEquals("default", rootQueues.get(0).getQueueName());
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testRefreshWithInvalidFile() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 createSimpleDocument(doc);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, false);
 deleteQueuesConfigFile();
 doc = createDocument();
 Element queues = createQueuesNode(doc);
 Element q1 = createQueue(doc, "");
 queues.appendChild(q1);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 try {
  QueueConfigurationParser cp = new QueueConfigurationParser(QUEUES_CONFIG_FILE_PATH, false);
  fail("Should throw an exception as configuration is wrong ");
 } catch (Throwable re) {
  re.printStackTrace();
  LOG.info(re.getMessage());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
 * Test to verify that the scheduling information per queue in the
 * {@link QueueManager} is retained across queue-refresh.
 * 
 * @throws Exception
 */
@Test
public void testSchedulingInfoAfterRefresh()
  throws Exception {
 JobQueueInfo[] queues = getSimpleQueueHierarchy();
 // write the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 QueueManager qManager = new QueueManager();
 // Set some scheduling information for the queues in the QueueManager.
 for (String qName : qManager.getLeafQueueNames()) {
  qManager.setSchedulerInfo(qName, new String(
    "scheduling-information-for-queue-" + qName));
 }
 qManager.refreshQueues(null, null);
 // Verify that the scheduling information is retained across refresh.
 for (String qName : qManager.getLeafQueueNames()) {
  assertEquals("scheduling-information-for-queue-" + qName,
    qManager.getSchedulerInfo(qName));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
 * Test to verify that the refresh of queue properties fails if a new queue is
 * added.
 * 
 * @throws Exception
 */
@Test
public void testRefreshWithAddedQueues()
  throws Exception {
 JobQueueInfo[] queues = getSimpleQueueHierarchy();
 // write the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 QueueManager qManager = new QueueManager();
 JobQueueInfo newQueue =
   newJobQueueInfo(new ArrayList<JobQueueInfo>(), null, "q4",
     QueueState.UNDEFINED, null);
 queues[0].addChild(newQueue);
 // Rewrite the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 testRefreshFailureWithChangeOfHierarchy(qManager);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testQueueView() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 createSimpleDocument(doc);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true);
 
 for (Queue queue : qm.getRoot().getChildren()) {
  checkHierarchy(queue, qm);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

@Test
public void testhasAccessForParent() throws Exception {
 deleteQueuesConfigFile();
 Document doc = createDocument();
 createSimpleDocument(doc);
 writeToFile(doc, QUEUES_CONFIG_FILE_PATH);
 QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true);
 UserGroupInformation ugi = createUGI("u1");
 assertFalse(qm.hasAccess("p1", QueueACL.SUBMIT_JOB, ugi));
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
 * Test to verify that the refresh of queue properties fails if queues are
 * removed.
 * 
 * @throws Exception
 */
@Test
public void testRefreshWithRemovedQueues()
  throws Exception {
 JobQueueInfo[] queues = getSimpleQueueHierarchy();
 // write the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 QueueManager qManager = new QueueManager();
 // Remove queue[2]
 JobQueueInfo q2 = queues[2];
 queues[0].removeChild(q2);
 // Rewrite the configuration file
 writeQueueConfigurationFile(
   QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] });
 testRefreshFailureWithChangeOfHierarchy(qManager);
}

相关文章