org.apache.hadoop.ipc.RPC.getServer()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(11.9k)|赞(0)|评价(0)|浏览(130)

本文整理了Java中org.apache.hadoop.ipc.RPC.getServer方法的一些代码示例,展示了RPC.getServer的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RPC.getServer方法的具体详情如下:
包路径:org.apache.hadoop.ipc.RPC
类名称:RPC
方法名:getServer

RPC.getServer介绍

[英]Construct a server for a protocol implementation instance listening on a port and address.
[中]为监听端口和地址的协议实现实例构造服务器。

代码示例

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/** Construct a server for a protocol implementation instance listening on a
 * port and address. */
public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
 throws IOException {
 return getServer(instance, bindAddress, port, 1, false, conf);
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/** Construct a server for a protocol implementation instance listening on a
 * port and address. */
public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
 throws IOException {
 return getServer(instance, bindAddress, port, 1, false, conf);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

private void initIpcServer(Configuration conf) throws IOException {
 //init ipc server
 InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
   conf.get("dfs.datanode.ipc.address"));
 ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), 
   conf.getInt("dfs.datanode.handler.count", 3), false, conf);
 ipcServer.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

private Server createMockDatanode(BlockTokenSecretManager sm,
  Token<BlockTokenIdentifier> token) throws IOException {
 ClientDatanodeProtocol mockDN = mock(ClientDatanodeProtocol.class);
 when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
   ClientDatanodeProtocol.versionID);
 BlockTokenIdentifier id = sm.createIdentifier();
 id.readFields(new DataInputStream(new ByteArrayInputStream(token
   .getIdentifier())));
 doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
   any(Block.class));
 return RPC.getServer(ClientDatanodeProtocol.class, mockDN,
   ADDRESS, 0, 5, true, conf, sm);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

@Test
public void testDigestRpc() throws Exception {
 TestTokenSecretManager sm = new TestTokenSecretManager();
 final Server server = RPC.getServer(TestSaslProtocol.class,
   new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm);
 doDigestRpc(server, sm);
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Initialize the server
 * 
 * @param address hostname:port to bind to
 * @param conf the configuration
 */
private void initialize(String address, Configuration conf) throws IOException {
 InetSocketAddress socAddr = NameNode.getAddress(address);
 this.supportAppends = conf.getBoolean("dfs.support.append", true);
 this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
 this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
               handlerCount, false, conf);
 // The rpc-server port can be ephemeral... ensure we have the correct info
 this.nameNodeAddress = this.server.getListenerAddress(); 
 FileSystem.setDefaultUri(conf, getUri(nameNodeAddress));
 LOG.info("Namenode up at: " + this.nameNodeAddress);
 myMetrics = new NameNodeMetrics(conf, this);
 this.namesystem = new FSNamesystem(this, conf);
 this.server.start();  //start RPC server   
 this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
 this.emptier.setDaemon(true);
 this.emptier.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

static void testKerberosRpc(String principal, String keytab) throws Exception {
 final Configuration newConf = new Configuration(conf);
 newConf.set(SERVER_PRINCIPAL_KEY, principal);
 newConf.set(SERVER_KEYTAB_KEY, keytab);
 SecurityUtil.login(newConf, SERVER_KEYTAB_KEY, SERVER_PRINCIPAL_KEY);
 TestUserGroupInformation.verifyLoginMetrics(1, 0);
 UserGroupInformation current = UserGroupInformation.getCurrentUser();
 System.out.println("UGI: " + current);
 Server server = RPC.getServer(TestSaslProtocol.class, new TestSaslImpl(),
   ADDRESS, 0, 5, true, newConf, null);
 TestSaslProtocol proxy = null;
 server.start();
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 try {
  proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);
  proxy.ping();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
 System.out.println("Test is successful.");
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void startDNServer() throws IOException {
 InetSocketAddress dnAddr = NameNode.getDNProtocolAddress(getConf());
 int handlerCount = getConf().getInt("dfs.namenode.handler.count", 10);
 
 if (dnAddr != null) {
  int dnHandlerCount =
   getConf().getInt(DATANODE_PROTOCOL_HANDLERS, handlerCount);
  this.dnProtocolServer = RPC.getServer(this, dnAddr.getHostName(),
                     dnAddr.getPort(), dnHandlerCount,
                     false, getConf());
  this.dnProtocolAddress = dnProtocolServer.getListenerAddress();
  NameNode.setDNProtocolAddress(getConf(), 
    dnProtocolAddress.getHostName() + ":" + dnProtocolAddress.getPort());
  LOG.info("Datanodes endpoint is up at: " + this.dnProtocolAddress);
 }
 
 if (this.dnProtocolServer != null) {
  this.dnProtocolServer.start();
 } else {
  this.startServerForClientRequests();
 }
 startTrashEmptier(getConf()); 
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

@Test
public void testSecureToInsecureRpc() throws Exception {
 Server server = RPC.getServer(TestSaslProtocol.class,
   new TestSaslImpl(), ADDRESS, 0, 5, true, conf, null);
 server.disableSecurity();
 TestTokenSecretManager sm = new TestTokenSecretManager();
 doDigestRpc(server, sm);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Job(JobID jobid, JobConf conf) throws IOException {
 this.doSequential =
  conf.getBoolean("mapred.localrunner.sequential", true);
 this.id = jobid;
 this.mapoutputFile = new MapOutputFile(jobid);
 this.mapoutputFile.setConf(conf);
 this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml");
 this.localFs = FileSystem.getLocal(conf);
 persistConf(this.localFs, this.localFile, conf);
 this.job = new JobConf(localFile);
 profile = new JobProfile(job.getUser(), id, localFile.toString(), 
              "http://localhost:8080/", job.getJobName());
 status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING);
 jobs.put(id, this);
 numSlots = conf.getInt(LOCAL_RUNNER_SLOTS, DEFAULT_LOCAL_RUNNER_SLOTS);
 executor = Executors.newFixedThreadPool(numSlots);
 int handlerCount = numSlots;
 umbilicalServer =
  RPC.getServer(this, LOCALHOST, 0, handlerCount, false, conf);
 umbilicalServer.start();
 umbilicalPort = umbilicalServer.getListenerAddress().getPort();
 this.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

/** Start RPC server */
MiniServer(Configuration conf, String user, String keytabFile)
throws IOException {
 UserGroupInformation.setConfiguration(conf);
 UserGroupInformation.loginUserFromKeytab(user, keytabFile);
 secretManager = 
  new TestDelegationTokenSecretManager(24*60*60*1000,
    7*24*60*60*1000,24*60*60*1000,3600000);
 secretManager.startThreads();
 rpcServer = RPC.getServer(MiniProtocol.class,
   this, DEFAULT_SERVER_ADDRESS, 0, 1, false, conf, secretManager);
 rpcServer.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
  * Test {@link AuditLogger} with IP set.
  */
 public void testAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf);
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
              TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

conf.setStrings(ProxyUsers.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
  "group1");
Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS,
  0, 2, false, conf, null);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void startServerForClientRequests() throws IOException {
 if (this.server == null) {
  InetSocketAddress socAddr = NameNode.getAddress(getConf());
  int handlerCount = getConf().getInt("dfs.namenode.handler.count", 10); 
  
  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                handlerCount, false, getConf());
  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress();
  FileSystem.setDefaultUri(getConf(), getUri(serverAddress));
  if (this.httpServer != null) {
   // This means the server is being started once out of safemode
   // and jetty is initialized already
   this.httpServer.setAttribute("name.node.address", getNameNodeAddress());
  }
  LOG.info("Namenode up at: " + this.serverAddress);
  
  
  this.server.start();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS,
  0, 2, false, conf, null);

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

@Test
public void testErrorMessage() throws Exception {
 BadTokenSecretManager sm = new BadTokenSecretManager();
 final Server server = RPC.getServer(TestSaslProtocol.class,
   new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm);
 boolean succeeded = false;
 try {
  doDigestRpc(server, sm);
 } catch (RemoteException e) {
  LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
  assertTrue(ERROR_MESSAGE.equals(e.getLocalizedMessage()));
  assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
  succeeded = true;
 }
 assertTrue(succeeded);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

public void testAvroSpecificRpc() throws Exception {
 Configuration conf = new Configuration();
 RPC.setProtocolEngine(conf, AvroSpecificTestProtocol.class, 
   AvroSpecificRpcEngine.class);
 Server server = RPC.getServer(AvroSpecificTestProtocol.class,
                new AvroSpecificTestProtocolImpl(), 
                ADDRESS, 0, conf);
 AvroSpecificTestProtocol proxy = null;
 try {
  server.start();
  InetSocketAddress addr = NetUtils.getConnectAddress(server);
  proxy =
   (AvroSpecificTestProtocol)RPC.getProxy(AvroSpecificTestProtocol.class, 
     0, addr, conf);
  
  CharSequence echo = proxy.echo("hello world");
  assertEquals("hello world", echo.toString());
  int intResult = proxy.add(1, 2);
  assertEquals(3, intResult);
 } finally {
  server.stop();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

public void testCalls() throws Exception {
 Configuration conf = new Configuration();
 RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class);
 Server server = RPC.getServer(AvroTestProtocol.class,
                new TestImpl(), ADDRESS, 0, conf);
 AvroTestProtocol proxy = null;

代码示例来源:origin: com.facebook.hadoop/hadoop-core

private void initialize(Configuration conf) 
 throws IOException, SAXException, InterruptedException, 
     HighTideConfigurationException,
     ClassNotFoundException, ParserConfigurationException {
 this.conf = conf;
 InetSocketAddress socAddr = HighTideNode.getAddress(conf);
 int handlerCount = conf.getInt("fs.hightidenodenode.handler.count", 10);
 // read in the configuration
 configMgr = new ConfigManager(conf);
 configMgr.reloadConfigsIfNecessary();
 configMgr.startReload();
 // create Metrics object
 myMetrics = new HighTideNodeMetrics(conf, this);
 // create rpc server 
 this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
               handlerCount, false, conf);
 // The rpc-server port can be ephemeral... ensure we have the correct info
 this.serverAddress = this.server.getListenerAddress();
 LOG.info("HighTideNode up at: " + this.serverAddress);
 initialized = true;
 running = true;
 this.server.start(); // start RPC server
 this.fileFixer = new FileFixer(conf);
 this.fileFixerThread = new Daemon(this.fileFixer);
 fileFixer.setPolicyInfo(configMgr.getAllPolicies());
 this.fileFixerThread.start();
 // start the deamon thread to resync if needed
 this.triggerThread = new Daemon(new TriggerMonitor());
 this.triggerThread.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

/**
 * Test that server.stop() properly stops all threads
 */
public void testStopsAllThreads() throws Exception {
 int threadsBefore = countThreads("Server$Listener$Reader");
 assertEquals("Expect no Reader threads running before test",
  0, threadsBefore);
 final Server server = RPC.getServer(TestProtocol.class,
   new TestImpl(), ADDRESS, 0, 5, true, conf, null);
 server.start();
 try {
  int threadsRunning = countThreads("Server$Listener$Reader");
  assertTrue(threadsRunning > 0);
 } finally {
  server.stop();
 }
 int threadsAfter = countThreads("Server$Listener$Reader");
 assertEquals("Expect no Reader threads left running after test",
  0, threadsAfter);
}

相关文章