org.apache.hadoop.ipc.Server类的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(13.9k)|赞(0)|评价(0)|浏览(208)

本文整理了Java中org.apache.hadoop.ipc.Server类的一些代码示例,展示了Server类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Server类的具体详情如下:
包路径:org.apache.hadoop.ipc.Server
类名称:Server

Server介绍

[英]An abstract IPC service. IPC calls take a single Writable as a parameter, and return a Writable as their value. A service runs on a port and is defined by a parameter class and a value class.
[中]

代码示例

代码示例来源:origin: Qihoo360/XLearning

protected void serviceStart() throws Exception {
 Configuration conf = new XLearningConfiguration();
 YarnRPC rpc = YarnRPC.create(conf);
 initializeWebApp(conf);
 InetSocketAddress address = conf.getSocketAddr(
   XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST,
   XLearningConfiguration.XLEARNING_HISTORY_ADDRESS,
   conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS),
   conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_PORT, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_PORT));
 server =
   rpc.getServer(HSClientProtocol.class, protocolHandler, address,
     conf, jhsDTSecretManager,
     conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_CLIENT_THREAD_COUNT,
       XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_CLIENT_THREAD_COUNT));
 // Enable service authorization?
 if (conf.getBoolean(
   CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
   false)) {
  server.refreshServiceAcl(conf, new ClientHSPolicyProvider());
 }
 server.start();
 this.bindAddress = conf.updateConnectAddr(XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST,
   XLearningConfiguration.XLEARNING_HISTORY_ADDRESS,
   conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS),
   server.getListenerAddress());
 LOG.info("Instantiated HistoryClientService at " + this.bindAddress);
 super.serviceStart();
}

代码示例来源:origin: apache/hive

public void shutdownServer() {
 if (started.get()) { // Primarily to avoid multiple shutdowns.
  started.set(false);
  server.stop();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/** Returns remote address as a string when invoked inside an RPC.
 *  Returns null in case of an error.
 */
public static String getRemoteAddress() {
 InetAddress addr = getRemoteIp();
 return (addr == null) ? null : addr.getHostAddress();
}

代码示例来源:origin: apache/hive

public LlapTaskUmbilicalServer(Configuration conf, LlapTaskUmbilicalProtocol umbilical, int numHandlers) throws IOException {
 jobTokenSecretManager = new JobTokenSecretManager();
 server = new RPC.Builder(conf)
   .setProtocol(LlapTaskUmbilicalProtocol.class)
   .setBindAddress("0.0.0.0")
   .setPort(0)
   .setInstance(umbilical)
   .setNumHandlers(numHandlers)
   .setSecretManager(jobTokenSecretManager).build();
 if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
  server.refreshServiceAcl(conf, new LlapUmbilicalExternalPolicyProvider());
 }
 server.start();
 this.address = NetUtils.getConnectAddress(server);
 LOG.info(
   "Started TaskUmbilicalServer: " + umbilical.getClass().getName() + " at address: " + address +
   " with numHandlers=" + numHandlers);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

RpcMetrics(Server server, Configuration conf) {
 String port = String.valueOf(server.getListenerAddress().getPort());
 name = "RpcActivityForPort" + port;
 this.server = server;
 registry = new MetricsRegistry("rpc")
   .tag("port", "RPC port", port)
   .tag("serverName", "Name of the RPC server", server.getServerName());
 int[] intervals = conf.getInts(
   CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
 rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
   CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,
   CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE_DEFAULT);

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testEmptyConfig() throws Exception {
 Configuration conf = new Configuration();
 conf.set("TestRange", "");
 ServerSocket socket = new ServerSocket();
 InetSocketAddress address = new InetSocketAddress("0.0.0.0", 0);
 try {
  Server.bind(socket, address, 10, conf, "TestRange");
  assertTrue(socket.isBound());
 } finally {
  socket.close();
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public Listener() throws IOException {
 address = new InetSocketAddress(bindAddress, port);
 // Create a new server socket and set to non blocking mode
 acceptChannel = ServerSocketChannel.open();
 acceptChannel.configureBlocking(false);
 // Bind the server socket to the local host and port
 bind(acceptChannel.socket(), address, backlogLength, conf, portRangeConfig);
 port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
 // create a selector;
 selector= Selector.open();
 readers = new Reader[readThreads];
 for (int i = 0; i < readThreads; i++) {
  Reader reader = new Reader(
    "Socket Reader #" + (i + 1) + " for port " + port);
  readers[i] = reader;
  reader.start();
 }
 // Register accepts on the server socket with the selector.
 acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
 this.setName("IPC Server listener on " + port);
 this.setDaemon(true);
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

/**
 * Initialize the server
 * 
 * @param address hostname:port to bind to
 * @param conf the configuration
 */
private void initialize(String address, Configuration conf) throws IOException {
 InetSocketAddress socAddr = NameNode.getAddress(address);
 this.supportAppends = conf.getBoolean("dfs.support.append", true);
 this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
 this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
               handlerCount, false, conf);
 // The rpc-server port can be ephemeral... ensure we have the correct info
 this.nameNodeAddress = this.server.getListenerAddress(); 
 FileSystem.setDefaultUri(conf, getUri(nameNodeAddress));
 LOG.info("Namenode up at: " + this.nameNodeAddress);
 myMetrics = new NameNodeMetrics(conf, this);
 this.namesystem = new FSNamesystem(this, conf);
 this.server.start();  //start RPC server   
 this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
 this.emptier.setDaemon(true);
 this.emptier.start();
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
 Configuration conf = new Configuration();
 try {
  RPC.setProtocolEngine(conf,
    HAServiceProtocolPB.class, ProtobufRpcEngine.class);
  HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
    new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
  BlockingService haPbService = HAServiceProtocolService
    .newReflectiveBlockingService(haServiceProtocolXlator);
  Server server = new RPC.Builder(conf)
    .setProtocol(HAServiceProtocolPB.class)
    .setInstance(haPbService)
    .setBindAddress(serverAddress.getHostName())
    .setPort(serverAddress.getPort()).build();
  server.start();
  return NetUtils.getConnectAddress(server);
 } catch (IOException e) {
  return null;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

static void testKerberosRpc(String principal, String keytab) throws Exception {
 final Configuration newConf = new Configuration(conf);
 newConf.set(SERVER_PRINCIPAL_KEY, principal);
 newConf.set(SERVER_KEYTAB_KEY, keytab);
 SecurityUtil.login(newConf, SERVER_KEYTAB_KEY, SERVER_PRINCIPAL_KEY);
 TestUserGroupInformation.verifyLoginMetrics(1, 0);
 UserGroupInformation current = UserGroupInformation.getCurrentUser();
 System.out.println("UGI: " + current);
 Server server = RPC.getServer(TestSaslProtocol.class, new TestSaslImpl(),
   ADDRESS, 0, 5, true, newConf, null);
 TestSaslProtocol proxy = null;
 server.start();
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 try {
  proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);
  proxy.ping();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
 System.out.println("Test is successful.");
}

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-server-common

@BeforeClass
public static void start() {
 InetSocketAddress address = new InetSocketAddress(0);
 Configuration configuration = new Configuration();
 ResourceTracker instance = new ResourceTrackerTestImpl();
 server = RpcServerFactoryPBImpl.get().getServer(ResourceTracker.class,
   instance, address, configuration, null, 1);
 server.start();
 client = (ResourceTracker) RpcClientFactoryPBImpl.get().getClient(
   ResourceTracker.class, 1, NetUtils.getConnectAddress(server),
   configuration);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

private void initIpcServer(Configuration conf) throws IOException {
 //init ipc server
 InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
   conf.get("dfs.datanode.ipc.address"));
 ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(), 
   conf.getInt("dfs.datanode.handler.count", 3), false, conf);
 ipcServer.start();
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

private void doDigestRpc(Server server, TestTokenSecretManager sm)
  throws Exception {
 server.start();
 final UserGroupInformation current = UserGroupInformation.getCurrentUser();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
   .getUserName()));
 Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
   sm);
 Text host = new Text(addr.getAddress().getHostAddress() + ":"
   + addr.getPort());
 token.setService(host);
 LOG.info("Service IP address for token is " + host);
 current.addToken(token);
 TestSaslProtocol proxy = null;
 try {
  proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, conf);
  //QOP must be auth
  Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
  proxy.ping();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

@Test
public void testRealUserIPNotSpecified() throws IOException {
 final Configuration conf = new Configuration();
 conf.setStrings(ProxyUsers
   .getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
 Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS,
   0, 2, false, conf, null);
  server.start();
  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    .createRemoteUser(REAL_USER_NAME);
    .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
  String retVal = proxyUserUgi
    .doAs(new PrivilegedExceptionAction<String>() {
     public String run() throws IOException {
      proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
  e.printStackTrace();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
  * Test {@link AuditLogger} with IP set.
  */
 public void testAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf);
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
              TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testBlockTokenRpc() throws Exception {
 Configuration conf = new Configuration();
 conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
 UserGroupInformation.setConfiguration(conf);
 
 BlockTokenSecretManager sm = new BlockTokenSecretManager(
   blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
 Token<BlockTokenIdentifier> token = sm.generateToken(block3,
   EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
 final Server server = createMockDatanode(sm, token, conf);
 server.start();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 final UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(block3.toString());
 ticket.addToken(token);
 ClientDatanodeProtocol proxy = null;
 try {
  proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
    NetUtils.getDefaultSocketFactory(conf));
  assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

@Override // DataNodeMXBean
public String getRpcPort(){
 return Integer.toString(this.ipcServer.getListenerAddress().getPort());
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/** Test to verify that InterDatanode RPC timesout as expected when
  *  the server DN does not respond.
  */
 @Test(expected=SocketTimeoutException.class)
 public void testInterDNProtocolTimeout() throws Throwable {
  final Server server = new TestServer(1, true);
  server.start();

  final InetSocketAddress addr = NetUtils.getConnectAddress(server);
  DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
  DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
  InterDatanodeProtocol proxy = null;

  try {
   proxy = DataNode.createInterDataNodeProtocolProxy(
     dInfo, conf, 500, false);
   proxy.initReplicaRecovery(new RecoveringBlock(
     new ExtendedBlock("bpid", 1), null, 100));
   fail ("Expected SocketTimeoutException exception, but did not get.");
  } finally {
   if (proxy != null) {
    RPC.stopProxy(proxy);
   }
   server.stop();
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testServerAddress() throws IOException {
 Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
   .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
   .setNumHandlers(5).setVerbose(true).build();
 InetSocketAddress bindAddr = null;
 try {
  bindAddr = NetUtils.getConnectAddress(server);
 } finally {
  server.stop();
 }
 assertEquals(InetAddress.getLocalHost(), bindAddr.getAddress());
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Job(JobID jobid, JobConf conf) throws IOException {
 this.doSequential =
  conf.getBoolean("mapred.localrunner.sequential", true);
 this.id = jobid;
 this.mapoutputFile = new MapOutputFile(jobid);
 this.mapoutputFile.setConf(conf);
 this.localFile = new JobConf(conf).getLocalPath(jobDir+id+".xml");
 this.localFs = FileSystem.getLocal(conf);
 persistConf(this.localFs, this.localFile, conf);
 this.job = new JobConf(localFile);
 profile = new JobProfile(job.getUser(), id, localFile.toString(), 
              "http://localhost:8080/", job.getJobName());
 status = new JobStatus(id, 0.0f, 0.0f, JobStatus.RUNNING);
 jobs.put(id, this);
 numSlots = conf.getInt(LOCAL_RUNNER_SLOTS, DEFAULT_LOCAL_RUNNER_SLOTS);
 executor = Executors.newFixedThreadPool(numSlots);
 int handlerCount = numSlots;
 umbilicalServer =
  RPC.getServer(this, LOCALHOST, 0, handlerCount, false, conf);
 umbilicalServer.start();
 umbilicalPort = umbilicalServer.getListenerAddress().getPort();
 this.start();
}

相关文章

微信公众号

最新文章

更多