org.apache.hadoop.ipc.RPC类的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(14.6k)|赞(0)|评价(0)|浏览(167)

本文整理了Java中org.apache.hadoop.ipc.RPC类的一些代码示例,展示了RPC类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RPC类的具体详情如下:
包路径:org.apache.hadoop.ipc.RPC
类名称:RPC

RPC介绍

[英]A simple RPC mechanism. A protocol is a Java interface. All parameters and return types must be one of:

  • a primitive type, boolean, byte, char, short, int, long, float, double, or void; or
  • a String; or
  • a Writable; or
  • an array of the above types
    All methods in the protocol should throw only IOException. No field data of the protocol instance is transmitted.
    [中]一个简单的RPC机制。协议是Java接口。所有参数和返回类型必须是以下其中之一:
    *基本类型,booleanbytecharshortintlongfloatdoublevoid;或
    *一串;或
    *可写的;或
    *上述类型的数组
    协议中的所有方法都应该只抛出IOException。没有传输协议实例的字段数据。

代码示例

代码示例来源:origin: apache/hive

synchronized void stopUmbilical() {
 if (umbilical != null) {
  RPC.stopProxy(umbilical);
 }
 umbilical = null;
}

代码示例来源:origin: Qihoo360/XLearning

private static ApplicationMessageProtocol getAppMessageHandler(
  YarnConfiguration conf, String appMasterAddress, int appMasterPort) throws IOException {
 ApplicationMessageProtocol appMessageHandler = null;
 if (!StringUtils.isBlank(appMasterAddress) && !appMasterAddress.equalsIgnoreCase("N/A")) {
  InetSocketAddress addr = new InetSocketAddress(appMasterAddress, appMasterPort);
  appMessageHandler = RPC.getProxy(ApplicationMessageProtocol.class, ApplicationMessageProtocol.versionID, addr, conf);
 }
 return appMessageHandler;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public HAServiceProtocolClientSideTranslatorPB(
  InetSocketAddress addr, Configuration conf,
  SocketFactory socketFactory, int timeout) throws IOException {
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
   RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
   UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
  Configuration conf) throws IOException {
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
   RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Get a protocol proxy that contains a proxy connection to a remote server
 * and a set of methods that are supported by the server
 * 
 * @param protocol protocol class
 * @param clientVersion client version
 * @param addr remote address
 * @param ticket user group information
 * @param conf configuration to use
 * @param factory socket factory
 * @return the protocol proxy
 * @throws IOException if the far end through a RemoteException
 */
public static <T> ProtocolProxy<T> getProtocolProxy(Class<T> protocol,
               long clientVersion,
               InetSocketAddress addr,
               UserGroupInformation ticket,
               Configuration conf,
               SocketFactory factory) throws IOException {
 return getProtocolProxy(protocol, clientVersion, addr, ticket, conf,
   factory, getRpcTimeout(conf), null);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY, 
       DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
  ProtobufRpcEngine.class);
   newReflectiveBlockingService(clientProtocolServerTranslator);
int maxDataLength = conf.getInt(IPC_MAXIMUM_DATA_LENGTH,
  IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator = 
 String bindHost = nn.getServiceRpcServerBindHost(conf);
 if (bindHost == null) {
  bindHost = serviceRpcAddr.getHostName();
   serviceRpcAddr.getPort());
  conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
        DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
 serviceRpcServer = new RPC.Builder(conf)
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 String bindHost = nn.getLifelineRpcServerBindHost(conf);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
  ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
  new InetSocketAddress("1.1.1.1", 1),
  UserGroupInformation.createRemoteUser("junk"), conf,
  NetUtils.getDefaultSocketFactory(conf));
  assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
  if (proxy != null) {
   RPC.stopProxy(proxy);
 server.stop();
RPC.stopProxy(proxyToNoWhere);

代码示例来源:origin: org.apache.hadoop/hadoop-common

ZKFCRpcServer(Configuration conf,
  InetSocketAddress bindAddr,
  ZKFailoverController zkfc,
  PolicyProvider policy) throws IOException {
 this.zkfc = zkfc;
 
 RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
   ProtobufRpcEngine.class);
 ZKFCProtocolServerSideTranslatorPB translator =
   new ZKFCProtocolServerSideTranslatorPB(this);
 BlockingService service = ZKFCProtocolService
   .newReflectiveBlockingService(translator);
 this.server = new RPC.Builder(conf).setProtocol(ZKFCProtocolPB.class)
   .setInstance(service).setBindAddress(bindAddr.getHostName())
   .setPort(bindAddr.getPort()).setNumHandlers(HANDLER_COUNT)
   .setVerbose(false).build();
 
 // set service-level authorization security policy
 if (conf.getBoolean(
   CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
  server.refreshServiceAcl(conf, policy);
 }
}

代码示例来源:origin: apache/hive

throws IOException {
Configuration serverConf = conf;
boolean isSecurityEnabled = conf.getBoolean(
  CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false);
if (isSecurityEnabled) {
  if (conf.get(acl.varname) != null) continue; // Some value is set.
  if (serverConf == conf) {
   serverConf = new Configuration(conf);
  serverConf.set(acl.varname, HiveConf.getVar(serverConf, acl)); // Set the default.
RPC.setProtocolEngine(serverConf, pbProtocol, ProtobufRpcEngine.class);
RPC.Builder builder = new RPC.Builder(serverConf)
  .setProtocol(pbProtocol)
  .setInstance(blockingService)
  .setBindAddress(addr.getHostName())
  .setPort(addr.getPort())
  .setNumHandlers(numHandlers);
if (secretManager != null) {

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

@Test
public void testPerConnectionConf() throws Exception {
 TestTokenSecretManager sm = new TestTokenSecretManager();
 final Server server = RPC.getServer(TestSaslProtocol.class,
   new TestSaslImpl(), ADDRESS, 0, 5, true, conf, sm);
 server.start();
 final UserGroupInformation current = UserGroupInformation.getCurrentUser();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
   .getUserName()));
 Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
   sm);
 Text host = new Text(addr.getAddress().getHostAddress() + ":"
   + addr.getPort());
 token.setService(host);
 LOG.info("Service IP address for token is " + host);
 current.addToken(token);
 TestSaslProtocol proxy3 = null;
 try {
  proxy1 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);
  Client client = WritableRpcEngine.getClient(conf);
  proxy2 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);
  assertEquals("number of connections in cache is wrong", 1, conns.size());
  proxy3 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

private InetSocketAddress startAndGetRPCServerAddress(InetSocketAddress serverAddress) {
 Configuration conf = new Configuration();
 try {
  RPC.setProtocolEngine(conf,
    HAServiceProtocolPB.class, ProtobufRpcEngine.class);
  HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
    new HAServiceProtocolServerSideTranslatorPB(new MockHAProtocolImpl());
  BlockingService haPbService = HAServiceProtocolService
    .newReflectiveBlockingService(haServiceProtocolXlator);
  Server server = new RPC.Builder(conf)
    .setProtocol(HAServiceProtocolPB.class)
    .setInstance(haPbService)
    .setBindAddress(serverAddress.getHostName())
    .setPort(serverAddress.getPort()).build();
  server.start();
  return NetUtils.getConnectAddress(server);
 } catch (IOException e) {
  return null;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

static void testKerberosRpc(String principal, String keytab) throws Exception {
 final Configuration newConf = new Configuration(conf);
 newConf.set(SERVER_PRINCIPAL_KEY, principal);
 newConf.set(SERVER_KEYTAB_KEY, keytab);
 SecurityUtil.login(newConf, SERVER_KEYTAB_KEY, SERVER_PRINCIPAL_KEY);
 TestUserGroupInformation.verifyLoginMetrics(1, 0);
 UserGroupInformation current = UserGroupInformation.getCurrentUser();
 System.out.println("UGI: " + current);
 Server server = RPC.getServer(TestSaslProtocol.class, new TestSaslImpl(),
   ADDRESS, 0, 5, true, newConf, null);
 TestSaslProtocol proxy = null;
 server.start();
 InetSocketAddress addr = NetUtils.getConnectAddress(server);
 try {
  proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, newConf);
  proxy.ping();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
 System.out.println("Test is successful.");
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
 this.jn = jn;
 Configuration confCopy = new Configuration(conf);
 confCopy.setBoolean(
   CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
   true);
 String bindHost = conf.getTrimmed(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, null);
 if (bindHost == null) {
  bindHost = addr.getHostName();
 LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort());
 RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
   ProtobufRpcEngine.class);
 QJournalProtocolServerSideTranslatorPB translator =
   .setInstance(service)
   .setBindAddress(bindHost)
   .setPort(addr.getPort())
   .setNumHandlers(HANDLER_COUNT)
   .setVerbose(false)

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

private void doDigestRpc(Server server, TestTokenSecretManager sm)
  throws Exception {
 server.start();
 final UserGroupInformation current = UserGroupInformation.getCurrentUser();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
   .getUserName()));
 Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
   sm);
 Text host = new Text(addr.getAddress().getHostAddress() + ":"
   + addr.getPort());
 token.setService(host);
 LOG.info("Service IP address for token is " + host);
 current.addToken(token);
 TestSaslProtocol proxy = null;
 try {
  proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
    TestSaslProtocol.versionID, addr, conf);
  //QOP must be auth
  Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
  proxy.ping();
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

if (servicePrincipal != null) {
 LOG.debug("Set service principal: {}", servicePrincipal);
 getConf().set(
   CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
   servicePrincipal);
RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
  ProtobufRpcEngine.class);
InetSocketAddress address = NetUtils.createSocketAddr(hostPort);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
Class<?> xface = TraceAdminProtocolPB.class;
proxy = (TraceAdminProtocolPB)RPC.getProxy(xface,
  RPC.getProtocolVersion(xface), address,
  ugi, getConf(), NetUtils.getDefaultSocketFactory(getConf()), 0);
remote = new TraceAdminProtocolTranslatorPB(proxy);
try {

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void initIpcServer() throws IOException {
 InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
   getConf().getTrimmed(DFS_DATANODE_IPC_ADDRESS_KEY));
 RPC.setProtocolEngine(getConf(), ClientDatanodeProtocolPB.class,
   ProtobufRpcEngine.class);
 ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator = 
   .setProtocol(ClientDatanodeProtocolPB.class)
   .setInstance(service)
   .setBindAddress(ipcAddr.getHostName())
   .setPort(ipcAddr.getPort())
   .setNumHandlers(
     getConf().getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
       DFS_DATANODE_HANDLER_COUNT_DEFAULT)).setVerbose(false)
   .setSecretManager(blockPoolTokenSecretManager).build();
 if (getConf().getBoolean(
   CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
  ipcServer.refreshServiceAcl(getConf(), new HDFSPolicyProvider());

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

private Server createServer(Class<?> pbProtocol, InetSocketAddress addr, Configuration conf, 
   SecretManager<? extends TokenIdentifier> secretManager, int numHandlers, 
   BlockingService blockingService, String portRangeConfig) throws IOException {
  RPC.setProtocolEngine(conf, pbProtocol, ProtobufRpcEngine.class);
  RPC.Server server = new RPC.Builder(conf).setProtocol(pbProtocol)
    .setInstance(blockingService).setBindAddress(addr.getHostName())
    .setPort(addr.getPort()).setNumHandlers(numHandlers).setVerbose(false)
    .setSecretManager(secretManager).setPortRangeConfig(portRangeConfig)
    .build();
  LOG.info("Adding protocol "+pbProtocol.getCanonicalName()+" to the server");
  server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, pbProtocol, blockingService);
  return server;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

public MiniProtocol run() throws IOException {
  MiniProtocol p = (MiniProtocol) RPC.getProxy(MiniProtocol.class,
    MiniProtocol.versionID, addr, conf);
  Token<TestDelegationTokenIdentifier> token;
  token = p.getDelegationToken(new Text(RENEWER));
  currentUgi = UserGroupInformation.createUserForTesting(MINI_USER, 
    GROUP_NAMES);
  token.setService(new Text(addr.getAddress().getHostAddress() 
    + ":" + addr.getPort()));
  currentUgi.addToken(token);
  return p;
 }
});

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

/**
  * Test {@link AuditLogger} with IP set.
  */
 public void testAuditLoggerWithIP() throws Exception {
  Configuration conf = new Configuration();
  // start the IPC server
  Server server = RPC.getServer(new MyTestRPCServer(), "0.0.0.0", 0, conf);
  server.start();

  InetSocketAddress addr = NetUtils.getConnectAddress(server);

  // Make a client connection and test the audit log
  TestProtocol proxy = (TestProtocol)RPC.getProxy(TestProtocol.class,
              TestProtocol.versionID, addr, conf);
  // Start the testcase
  proxy.ping();

  server.stop();
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testBlockTokenRpc() throws Exception {
 Configuration conf = new Configuration();
 conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
 UserGroupInformation.setConfiguration(conf);
 
 BlockTokenSecretManager sm = new BlockTokenSecretManager(
   blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
 Token<BlockTokenIdentifier> token = sm.generateToken(block3,
   EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
 final Server server = createMockDatanode(sm, token, conf);
 server.start();
 final InetSocketAddress addr = NetUtils.getConnectAddress(server);
 final UserGroupInformation ticket = UserGroupInformation
   .createRemoteUser(block3.toString());
 ticket.addToken(token);
 ClientDatanodeProtocol proxy = null;
 try {
  proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
    NetUtils.getDefaultSocketFactory(conf));
  assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
 } finally {
  server.stop();
  if (proxy != null) {
   RPC.stopProxy(proxy);
  }
 }
}

相关文章