org.apache.hadoop.ipc.RPC.getProxy()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(8.2k)|赞(0)|评价(0)|浏览(179)

本文整理了Java中org.apache.hadoop.ipc.RPC.getProxy方法的一些代码示例,展示了RPC.getProxy的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RPC.getProxy方法的具体详情如下:
包路径:org.apache.hadoop.ipc.RPC
类名称:RPC
方法名:getProxy

RPC.getProxy介绍

[英]Construct a client-side proxy object with the default SocketFactory
[中]使用默认的SocketFactory构造客户端代理对象

代码示例

代码示例来源:origin: apache/hive

@Override
 public LlapTaskUmbilicalProtocol run() throws Exception {
  return RPC.getProxy(LlapTaskUmbilicalProtocol.class,
    LlapTaskUmbilicalProtocol.versionID, address, taskOwner, conf, socketFactory);
 }
});

代码示例来源:origin: apache/hive

@Override
 public LlapTaskUmbilicalProtocol run() throws Exception {
  return RPC
   .getProxy(LlapTaskUmbilicalProtocol.class, LlapTaskUmbilicalProtocol.versionID,
    address, UserGroupInformation.getCurrentUser(), amNodeInfo.conf,
    amNodeInfo.socketFactory, (int) (amNodeInfo.timeout));
 }
});

代码示例来源:origin: Qihoo360/XLearning

private static ApplicationMessageProtocol getAppMessageHandler(
  YarnConfiguration conf, String appMasterAddress, int appMasterPort) throws IOException {
 ApplicationMessageProtocol appMessageHandler = null;
 if (!StringUtils.isBlank(appMasterAddress) && !appMasterAddress.equalsIgnoreCase("N/A")) {
  InetSocketAddress addr = new InetSocketAddress(appMasterAddress, appMasterPort);
  appMessageHandler = RPC.getProxy(ApplicationMessageProtocol.class, ApplicationMessageProtocol.versionID, addr, conf);
 }
 return appMessageHandler;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public HAServiceProtocolClientSideTranslatorPB(
  InetSocketAddress addr, Configuration conf,
  SocketFactory socketFactory, int timeout) throws IOException {
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
   RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
   UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public ZKFCProtocolClientSideTranslatorPB(
  InetSocketAddress addr, Configuration conf,
  SocketFactory socketFactory, int timeout) throws IOException {
 RPC.setProtocolEngine(conf, ZKFCProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(ZKFCProtocolPB.class,
   RPC.getProtocolVersion(ZKFCProtocolPB.class), addr,
   UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
  Configuration conf) throws IOException {
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
   RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}

代码示例来源:origin: Qihoo360/XLearning

private void init() {
 LOG.info("XLearningContainer initializing");
 String appMasterHost = System.getenv(XLearningConstants.Environment.APPMASTER_HOST.toString());
 int appMasterPort = Integer.valueOf(System.getenv(XLearningConstants.Environment.APPMASTER_PORT.toString()));
 InetSocketAddress addr = new InetSocketAddress(appMasterHost, appMasterPort);
 try {
  this.amClient = RPC.getProxy(ApplicationContainerProtocol.class,
    ApplicationContainerProtocol.versionID, addr, conf);
 } catch (IOException e) {
  LOG.error("Connecting to ApplicationMaster " + appMasterHost + ":" + appMasterPort + " failed!");
  LOG.error("Container will suicide!");
  System.exit(1);
 }
 heartbeatThread = new Heartbeat(amClient, conf, containerId, outputIndex, index, role);
 heartbeatThread.setDaemon(true);
 heartbeatThread.start();
 heartbeatThread.setContainerStatus(XLearningContainerStatus.INITIALIZING);
 containerReporter = null;
 if ((("TENSORFLOW".equals(xlearningAppType) || "LIGHTLDA".equals(xlearningAppType)) && !single) || xlearningAppType.equals("DISTLIGHTGBM")) {
  try {
   Utilities.getReservePort(reservedSocket, InetAddress.getByName(localHost).getHostAddress(), reservePortBegin, reservePortEnd);
  } catch (IOException e) {
   LOG.error("Can not get available port");
   reportFailedAndExit();
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Get a client of the {@link GetUserMappingsProtocol}.
 * @return A {@link GetUserMappingsProtocol} client proxy.
 * @throws IOException
 */
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
 GetUserMappingsProtocol userGroupMappingProtocol =
  RPC.getProxy(GetUserMappingsProtocol.class, 
    GetUserMappingsProtocol.versionID,
    getProtocolAddress(getConf()), UserGroupInformation.getCurrentUser(),
    getConf(), NetUtils.getSocketFactory(getConf(),
      GetUserMappingsProtocol.class));
 return userGroupMappingProtocol;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
Class<?> xface = TraceAdminProtocolPB.class;
proxy = (TraceAdminProtocolPB)RPC.getProxy(xface,
  RPC.getProtocolVersion(xface), address,
  ugi, getConf(), NetUtils.getDefaultSocketFactory(getConf()), 0);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public InterDatanodeProtocolTranslatorPB(InetSocketAddress addr,
  UserGroupInformation ugi, Configuration conf, SocketFactory factory,
  int socketTimeout)
  throws IOException {
 RPC.setProtocolEngine(conf, InterDatanodeProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(InterDatanodeProtocolPB.class,
   RPC.getProtocolVersion(InterDatanodeProtocolPB.class), addr, ugi, conf,
   factory, socketTimeout);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static DatanodeProtocolPB createNamenode(
  InetSocketAddress nameNodeAddr, Configuration conf,
  UserGroupInformation ugi) throws IOException {
 return RPC.getProxy(DatanodeProtocolPB.class,
   RPC.getProtocolVersion(DatanodeProtocolPB.class), nameNodeAddr, ugi,
   conf, NetUtils.getSocketFactory(conf, DatanodeProtocolPB.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static DatanodeLifelineProtocolPB createNamenode(
  InetSocketAddress nameNodeAddr, Configuration conf,
  UserGroupInformation ugi) throws IOException {
 return RPC.getProxy(DatanodeLifelineProtocolPB.class,
   RPC.getProtocolVersion(DatanodeLifelineProtocolPB.class), nameNodeAddr,
   ugi, conf,
   NetUtils.getSocketFactory(conf, DatanodeLifelineProtocolPB.class));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public InterQJournalProtocol run() throws IOException {
  RPC.setProtocolEngine(confCopy, InterQJournalProtocolPB.class,
    ProtobufRpcEngine.class);
  InterQJournalProtocolPB interQJournalProtocolPB = RPC.getProxy(
    InterQJournalProtocolPB.class,
    RPC.getProtocolVersion(InterQJournalProtocolPB.class),
    jnAddr, confCopy);
  return new InterQJournalProtocolTranslatorPB(
    interQJournalProtocolPB);
 }
});

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static Object createNameNodeProxy(InetSocketAddress address,
  Configuration conf, UserGroupInformation ugi, Class<?> xface,
  int rpcTimeout) throws IOException {
 RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
 Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
   ugi, conf, NetUtils.getDefaultSocketFactory(conf), rpcTimeout);
 return proxy;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override
 public QJournalProtocol run() throws IOException {
  RPC.setProtocolEngine(confCopy,
    QJournalProtocolPB.class, ProtobufRpcEngine.class);
  QJournalProtocolPB pbproxy = RPC.getProxy(
    QJournalProtocolPB.class,
    RPC.getProtocolVersion(QJournalProtocolPB.class),
    addr, confCopy);
  return new QJournalProtocolTranslatorPB(pbproxy);
 }
});

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
 ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0);

代码示例来源:origin: org.apache.hadoop/hadoop-yarn-common

public ClientSCMProtocolPBClientImpl(long clientVersion,
  InetSocketAddress addr, Configuration conf) throws IOException {
 RPC.setProtocolEngine(conf, ClientSCMProtocolPB.class,
  ProtobufRpcEngine.class);
 proxy = RPC.getProxy(ClientSCMProtocolPB.class, clientVersion, addr, conf);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Override
 public MiniProtocol run() throws IOException {
  return RPC.getProxy(MiniProtocol.class,
    MiniProtocol.versionID, addr, conf);
 }
});

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
  Configuration conf) throws IOException {
 RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
   ProtobufRpcEngine.class);
 rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
   RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Override
 public String run() throws IOException {
  proxy = RPC.getProxy(TestProtocol.class,
    TestProtocol.versionID, addr, conf);
  String ret = proxy.aMethod();
  return ret;
 }
});

相关文章