org.apache.hadoop.ipc.RPC.waitForProxy()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(6.7k)|赞(0)|评价(0)|浏览(150)

本文整理了Java中org.apache.hadoop.ipc.RPC.waitForProxy方法的一些代码示例,展示了RPC.waitForProxy的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RPC.waitForProxy方法的具体详情如下:
包路径:org.apache.hadoop.ipc.RPC
类名称:RPC
方法名:waitForProxy

RPC.waitForProxy介绍

[英]Get a proxy connection to a remote server
[中]获取到远程服务器的代理连接

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private NamenodeProtocol getActiveNodeProxy() throws IOException {
  if (cachedActiveProxy == null) {
   while (true) {
    // if we have reached the max loop count, quit by returning null
    if ((nnLoopCount / nnCount) >= maxRetries) {
     return null;
    }
    currentNN = nnLookup.next();
    try {
     int rpcTimeout = conf.getInt(
       DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
       DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
     NamenodeProtocolPB proxy = RPC.waitForProxy(NamenodeProtocolPB.class,
       RPC.getProtocolVersion(NamenodeProtocolPB.class), currentNN.getIpcAddress(), conf,
       rpcTimeout, Long.MAX_VALUE);
     cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
     break;
    } catch (IOException e) {
     LOG.info("Failed to reach " + currentNN, e);
     // couldn't even reach this NN, try the next one
     nnLoopCount++;
    }
   }
  }
  assert cachedActiveProxy != null;
  return cachedActiveProxy;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

public Object run() throws IOException {
  return RPC.waitForProxy(InterTrackerProtocol.class,
    InterTrackerProtocol.versionID, 
    jobTrackAddr, fConf);  
 }
});

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

public static VersionedProtocol waitForProxy(Class protocol,
  long clientVersion,
  InetSocketAddress addr,
  Configuration conf
  ) throws IOException {
 return waitForProxy(protocol, clientVersion, addr, conf, Long.MAX_VALUE);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

protected void initJobClient(JobConf conf) throws IOException {
 this.jobTrackAddr = JobTracker.getAddress(conf);
 this.jobClient = (InterTrackerProtocol)
 RPC.waitForProxy(InterTrackerProtocol.class,
   InterTrackerProtocol.versionID,
   jobTrackAddr,conf);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

public void testStandaloneClient() throws IOException {
 try {
  RPC.waitForProxy(TestProtocol.class,
   TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
  fail("We should not have reached here");
 } catch (ConnectException ioe) {
  //this is what we expected
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

private NamenodeProtocol getActiveNodeProxy() throws IOException {
 if (cachedActiveProxy == null) {
  int rpcTimeout = conf.getInt(
    DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
    DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
  NamenodeProtocolPB proxy = RPC.waitForProxy(NamenodeProtocolPB.class,
    RPC.getProtocolVersion(NamenodeProtocolPB.class), activeAddr, conf,
    rpcTimeout, Long.MAX_VALUE);
  cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
 }
 assert cachedActiveProxy != null;
 return cachedActiveProxy;
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

private NamenodeProtocol getActiveNodeProxy() throws IOException {
 if (cachedActiveProxy == null) {
  int rpcTimeout = conf.getInt(
    DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_KEY,
    DFSConfigKeys.DFS_HA_LOGROLL_RPC_TIMEOUT_DEFAULT);
  NamenodeProtocolPB proxy = RPC.waitForProxy(NamenodeProtocolPB.class,
    RPC.getProtocolVersion(NamenodeProtocolPB.class), activeAddr, conf,
    rpcTimeout, Long.MAX_VALUE);
  cachedActiveProxy = new NamenodeProtocolTranslatorPB(proxy);
 }
 assert cachedActiveProxy != null;
 return cachedActiveProxy;
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public static DirectTaskUmbilical createDirectUmbilical(
 TaskUmbilicalProtocol taskTracker,
 InetSocketAddress jobTrackerAddress, JobConf conf) throws IOException {
 LOG.info("Creating direct umbilical to " + jobTrackerAddress.toString());
 long jtConnectTimeoutMsec = conf.getLong(
  "corona.jobtracker.connect.timeout.msec", 60000L);
 int rpcTimeout = (int) jtConnectTimeoutMsec;
 InterTrackerProtocol jobClient = RPC.waitForProxy(
  InterTrackerProtocol.class,
  InterTrackerProtocol.versionID,
  jobTrackerAddress,
  conf,
  jtConnectTimeoutMsec,
  rpcTimeout);
 return new DirectTaskUmbilical(taskTracker, jobClient);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Override
public void run() {
 try {
  Configuration config = new Configuration(conf);
  config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
    connectRetries);
  config.setInt(
    IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
    connectRetries);
  waitStarted = true;
  TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
    TestProtocol.versionID,
    new InetSocketAddress(ADDRESS, 20),
    config,
    15000L);
  proxy.echo("");
 } catch (Throwable throwable) {
  caught = throwable;
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Override
public void run() {
 try {
  Configuration config = new Configuration(conf);
  config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
    connectRetries);
  config.setInt(
    IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
    connectRetries);
  waitStarted = true;
  TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
    TestProtocol.versionID,
    new InetSocketAddress(ADDRESS, 20),
    config,
    15000L);
  proxy.echo("");
 } catch (Throwable throwable) {
  caught = throwable;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

@Test
public void testStandaloneClient() throws IOException {
 try {
  TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
   TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
  proxy.echo("");
  fail("We should not have reached here");
 } catch (ConnectException ioe) {
  //this is what we expected
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

@Test
public void testStandaloneClient() throws IOException {
 try {
  TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
   TestProtocol.versionID, new InetSocketAddress(ADDRESS, 20), conf, 15000L);
  proxy.echo("");
  fail("We should not have reached here");
 } catch (ConnectException ioe) {
  //this is what we expected
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

void setupNS(Configuration conf, AbstractList<File> dataDirs) 
throws IOException {
 // get NN proxy
 DatanodeProtocol dnp = 
  (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
    DatanodeProtocol.versionID, nnAddr, conf);
 setNameNode(dnp);
 // handshake with NN
 NamespaceInfo nsInfo = handshake();
 setNamespaceInfo(nsInfo);
 synchronized(DataNode.this){
  setupNSStorage();
 }
 
 nsRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());
 nsRegistration.setInfoPort(infoServer.getPort());
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
  NamenodeProtocol.versionID, nameNodeAddr, conf);

代码示例来源:origin: com.facebook.hadoop/hadoop-core

(NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
  NamenodeProtocol.versionID, nameNodeAddr, conf);

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

RPC.waitForProxy(DatanodeProtocol.class,
         DatanodeProtocol.versionID,
         nameNodeAddr,

代码示例来源:origin: io.fabric8/fabric-hadoop

RPC.waitForProxy(DatanodeProtocol.class,
         DatanodeProtocol.versionID,
         nameNodeAddr,

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

RPC.waitForProxy(InterTrackerProtocol.class,
         InterTrackerProtocol.versionID, 
         jobTrackAddr, this.fConf);

相关文章