org.apache.hadoop.hbase.zookeeper.ZKUtil类的使用及代码示例

x33g5p2x  于2022-02-05 转载在 其他  
字(16.0k)|赞(0)|评价(0)|浏览(208)

本文整理了Java中org.apache.hadoop.hbase.zookeeper.ZKUtil类的一些代码示例,展示了ZKUtil类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ZKUtil类的具体详情如下:
包路径:org.apache.hadoop.hbase.zookeeper.ZKUtil
类名称:ZKUtil

ZKUtil介绍

[英]Internal HBase utility class for ZooKeeper.

Contains only static methods and constants.

Methods all throw KeeperException if there is an unexpected zookeeper exception, so callers of these methods must handle appropriately. If ZK is required for the operation, the server will need to be aborted.
[中]ZooKeeper的内部HBase实用程序类。
只包含静态方法和常量。
如果出现意外的zookeeper异常,所有方法都会抛出KeeperException,因此这些方法的调用方必须正确处理。如果操作需要ZK,则需要中止服务器。

代码示例

代码示例来源:origin: apache/hbase

public void start() {
 try {
  watcher.registerListener(this);
  String parent = ZKUtil.getParent(leaderZNode);
  if (ZKUtil.checkExists(watcher, parent) < 0) {
   ZKUtil.createWithParents(watcher, parent);
  }
 } catch (KeeperException ke) {
  watcher.abort("Unhandled zk exception when starting", ke);
  candidate.stop("Unhandled zk exception starting up: "+ke.getMessage());
 }
}

代码示例来源:origin: apache/hbase

/**
 * Create a znode with data
 */
@Test
public void testCreateWithParents() throws KeeperException, InterruptedException {
 byte[] expectedData = new byte[] { 1, 2, 3 };
 ZKUtil.createWithParents(ZKW, "/l1/l2/l3/l4/testCreateWithParents", expectedData);
 byte[] data = ZKUtil.getData(ZKW, "/l1/l2/l3/l4/testCreateWithParents");
 assertTrue(Bytes.equals(expectedData, data));
 ZKUtil.deleteNodeRecursively(ZKW, "/l1");
 ZKUtil.createWithParents(ZKW, "/testCreateWithParents", expectedData);
 data = ZKUtil.getData(ZKW, "/testCreateWithParents");
 assertTrue(Bytes.equals(expectedData, data));
 ZKUtil.deleteNodeRecursively(ZKW, "/testCreateWithParents");
}

代码示例来源:origin: apache/hbase

/**
 * Get the znodes corresponding to the meta replicas from ZK
 * @return list of znodes
 * @throws KeeperException if a ZooKeeper operation fails
 */
public List<String> getMetaReplicaNodes() throws KeeperException {
 List<String> childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode);
 List<String> metaReplicaNodes = new ArrayList<>(2);
 if (childrenOfBaseNode != null) {
  String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server");
  for (String child : childrenOfBaseNode) {
   if (child.startsWith(pattern)) {
    metaReplicaNodes.add(child);
   }
  }
 }
 return metaReplicaNodes;
}

代码示例来源:origin: apache/hbase

private void createBaseZNodes() throws ZooKeeperConnectionException {
 try {
  // Create all the necessary "directories" of znodes
  ZKUtil.createWithParents(this, znodePaths.baseZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.rsZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.drainingZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.tableZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.splitLogZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.backupMasterAddressesZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.tableLockZNode);
  ZKUtil.createAndFailSilent(this, znodePaths.masterMaintZNode);
 } catch (KeeperException e) {
  throw new ZooKeeperConnectionException(
    prefix("Unexpected KeeperException creating base node"), e);
 }
}

代码示例来源:origin: apache/hbase

private static void processSequentially(ZKWatcher zkw, List<ZKUtilOp> ops)
  throws KeeperException, NoNodeException {
 for (ZKUtilOp op : ops) {
  if (op instanceof CreateAndFailSilent) {
   createAndFailSilent(zkw, (CreateAndFailSilent) op);
  } else if (op instanceof DeleteNodeFailSilent) {
   deleteNodeFailSilent(zkw, (DeleteNodeFailSilent) op);
  } else if (op instanceof SetData) {
   setData(zkw, (SetData) op);
  } else {
   throw new UnsupportedOperationException("Unexpected ZKUtilOp type: "
     + op.getClass().getName());
  }
 }
}

代码示例来源:origin: apache/hbase

public void start() throws KeeperException {
 watcher.registerListener(this);
 // make sure the base node exists
 ZKUtil.createWithParents(watcher, keysParentZNode);
 if (ZKUtil.watchAndCheckExists(watcher, keysParentZNode)) {
  List<ZKUtil.NodeAndData> nodes =
    ZKUtil.getChildDataAndWatchForNewChildren(watcher, keysParentZNode);
  refreshNodes(nodes);
 }
}

代码示例来源:origin: apache/hbase

private static String initPeerClusterState(String baseZKNode)
  throws IOException, KeeperException {
 // Add a dummy region server and set up the cluster id
 Configuration testConf = new Configuration(conf);
 testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode);
 ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null);
 String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode,
     "hostname1.example.org:1234");
 ZKUtil.createWithParents(zkw1, fakeRs);
 ZKClusterId.setClusterId(zkw1, new ClusterId());
 return ZKConfig.getZooKeeperClusterKey(testConf);
}

代码示例来源:origin: apache/hbase

@Before
public void setUp() throws Exception {
 Configuration conf = testUtil.getConfiguration();
 conf.set(HConstants.MASTER_PORT, "0");
 conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 2000);
 testUtil.startMiniZKCluster();
 ZKWatcher watcher = testUtil.getZooKeeperWatcher();
 ZKUtil.createWithParents(watcher, watcher.getZNodePaths().masterAddressZNode,
     Bytes.toBytes("fake:123"));
 master = new HMaster(conf);
 rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT);
}

代码示例来源:origin: apache/hbase

Configuration conf = TEST_UTIL.getConfiguration();
ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
  HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
  conf.get("zookeeper.znode.metaserver", "meta-region-server"));
byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
ServerName currentServer = ProtobufUtil.toServerName(data);
Collection<ServerName> liveServers = TEST_UTIL.getAdmin()
  .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet();
ServerName moveToServer = null;
for (ServerName s : liveServers) {
 if (!currentServer.equals(s)) {
  moveToServer = s;
assertTrue(TEST_UTIL.getAdmin().tableExists(tableName));
TEST_UTIL.getAdmin().move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
  Bytes.toBytes(moveToServer.getServerName()));
int i = 0;
assert !moveToServer.equals(currentServer);
LOG.info("CurrentServer=" + currentServer + ", moveToServer=" + moveToServer);
final int max = 10000;
do {
 Thread.sleep(10);
 data = ZKUtil.getData(zkw, primaryMetaZnode);
 currentServer = ProtobufUtil.toServerName(data);
 i++;

代码示例来源:origin: XiaoMi/themis

TEST_UTIL.shutdownMiniCluster();
conf.set(ThemisRegionObserver.THEMIS_DELETE_THEMIS_DELETED_DATA_WHEN_COMPACT, "true");
TransactionTTL.timestampType = TimestampType.MS;
TransactionTestBase.startMiniCluster(conf);
initEnv();
ZooKeeperWatcher zk = new ZooKeeperWatcher(conf, "test", null, true);
HBaseAdmin admin = new HBaseAdmin(connection);
ZKUtil.createSetData(zk, ThemisMasterObserver.getThemisExpiredTsZNodePath(zk),
 Bytes.toBytes(String.valueOf(Long.MIN_VALUE)));
ZKUtil.createSetData(zk, ThemisMasterObserver.getThemisExpiredTsZNodePath(zk),
 Bytes.toBytes(String.valueOf(prewriteTs + 5)));
Assert.assertNull(result);
deleteOldDataAndUpdateTs();
conf.set(ThemisRegionObserver.THEMIS_DELETE_THEMIS_DELETED_DATA_WHEN_COMPACT, "false");
zk.close();

代码示例来源:origin: apache/hbase

LOG.info("Found " + hostName + " on " + ni);
TEST_UTIL.getConfiguration().set(HRegionServer.MASTER_HOSTNAME_KEY, hostName);
TEST_UTIL.getConfiguration().set(HRegionServer.RS_HOSTNAME_KEY, hostName);
StartMiniClusterOption option = StartMiniClusterOption.builder()
  .numMasters(NUM_MASTERS).numRegionServers(NUM_RS).numDataNodes(NUM_RS).build();
TEST_UTIL.startMiniCluster(option);
try {
 ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
 List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode);

代码示例来源:origin: co.cask.hbase/hbase

private static void getReplicationZnodesDump(ZooKeeperWatcher zkw, StringBuilder sb)
  throws KeeperException {
 String replicationZNodeName = zkw.getConfiguration().get("zookeeper.znode.replication",
  "replication");
 String replicationZnode = joinZNode(zkw.baseZNode, replicationZNodeName);
 if (ZKUtil.checkExists(zkw, replicationZnode) == -1)
  return;
 // do a ls -r on this znode
 List<String> stack = new LinkedList<String>();
 stack.add(replicationZnode);
 do {
  String znodeToProcess = stack.remove(stack.size() - 1);
  sb.append("\n").append(znodeToProcess).append(": ")
    .append(Bytes.toString(ZKUtil.getData(zkw, znodeToProcess)));
  for (String zNodeChild : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) {
   stack.add(ZKUtil.joinZNode(znodeToProcess, zNodeChild));
  }
 } while (stack.size() > 0);
}
/**

代码示例来源:origin: apache/hbase

public SplitOrMergeTracker(ZKWatcher watcher, Configuration conf,
              Abortable abortable) {
 try {
  if (ZKUtil.checkExists(watcher, watcher.getZNodePaths().switchZNode) < 0) {
   ZKUtil.createAndFailSilent(watcher, watcher.getZNodePaths().switchZNode);
  }
 } catch (KeeperException e) {
  throw new RuntimeException(e);
 }
 splitZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().switchZNode,
  conf.get("zookeeper.znode.switch.split", "split"));
 mergeZnode = ZNodePaths.joinZNode(watcher.getZNodePaths().switchZNode,
  conf.get("zookeeper.znode.switch.merge", "merge"));
 splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable);
 mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable);
}

代码示例来源:origin: apache/hbase

@Test
public void testCleanZNode() throws Exception {
 ZKWatcher zkw = new ZKWatcher(TEST_UTIL.getConfiguration(),
   "testNodeTracker", new TestZKNodeTracker.StubAbortable());
 final ServerName sn = ServerName.valueOf("127.0.0.1:52", 45L);
 ZKUtil.createAndFailSilent(zkw,
   TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_ZNODE_PARENT,
     HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT));
 final String nodeName =  zkw.getZNodePaths().masterAddressZNode;
 // Check that we manage the case when there is no data
 ZKUtil.createAndFailSilent(zkw, nodeName);
 MasterAddressTracker.deleteIfEquals(zkw, sn.toString());
 assertNotNull(ZKUtil.getData(zkw, nodeName));
 // Check that we don't delete if we're not supposed to
 ZKUtil.setData(zkw, nodeName, MasterAddressTracker.toByteArray(sn, 0));
 MasterAddressTracker.deleteIfEquals(zkw, ServerName.valueOf("127.0.0.2:52", 45L).toString());
 assertNotNull(ZKUtil.getData(zkw, nodeName));
 // Check that we delete when we're supposed to
 ZKUtil.setData(zkw, nodeName,MasterAddressTracker.toByteArray(sn, 0));
 MasterAddressTracker.deleteIfEquals(zkw, sn.toString());
 assertNull(ZKUtil.getData(zkw, nodeName));
 // Check that we support the case when the znode does not exist
 MasterAddressTracker.deleteIfEquals(zkw, sn.toString()); // must not throw an exception
}

代码示例来源:origin: apache/hbase

@Test
public void testZookeeperNodesForReplicas() throws Exception {
 // Checks all the znodes exist when meta's replicas are enabled
 ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
 Configuration conf = TEST_UTIL.getConfiguration();
 String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
   HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
 String primaryMetaZnode = ZNodePaths.joinZNode(baseZNode,
   conf.get("zookeeper.znode.metaserver", "meta-region-server"));
 // check that the data in the znode is parseable (this would also mean the znode exists)
 byte[] data = ZKUtil.getData(zkw, primaryMetaZnode);
 ProtobufUtil.toServerName(data);
 for (int i = 1; i < 3; i++) {
  String secZnode = ZNodePaths.joinZNode(baseZNode,
    conf.get("zookeeper.znode.metaserver", "meta-region-server") + "-" + i);
  String str = zkw.getZNodePaths().getZNodeForReplica(i);
  assertTrue(str.equals(secZnode));
  // check that the data in the znode is parseable (this would also mean the znode exists)
  data = ZKUtil.getData(zkw, secZnode);
  ProtobufUtil.toServerName(data);
 }
}

代码示例来源:origin: harbby/presto-connectors

ZooKeeperWatcher zkw = null;
try {
 zkw = new ZooKeeperWatcher(getConf(), "Migrate ZK data to PB.",
  new ZKDataMigratorAbortable());
 if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
  LOG.info("No hbase related data available in zookeeper. returning..");
  return 0;
 List<String> children = ZKUtil.listChildrenNoWatch(zkw, zkw.baseZNode);
 if (children == null) {
  LOG.info("No child nodes to mirgrate. returning..");
  childPath = ZKUtil.joinZNode(zkw.baseZNode, child);
  if (child.equals(conf.get("zookeeper.znode.rootserver", "root-region-server"))) {
   ZKUtil.deleteNodeRecursively(zkw, childPath);
   ZKUtil.deleteNodeRecursively(zkw, childPath);
  } else if (child.equals(conf.get("zookeeper.znode.draining.rs", "draining"))) {
   ZKUtil.deleteNodeRecursively(zkw, childPath);
   ZKUtil.deleteNodeRecursively(zkw, childPath);
   ZKUtil.deleteNodeRecursively(zkw, childPath);
   ZKUtil.deleteNodeRecursively(zkw, childPath);
   ZKUtil.deleteNodeRecursively(zkw, childPath);

代码示例来源:origin: harbby/presto-connectors

/**
 * Appends replication znodes to the passed StringBuilder.
 * @param zkw
 * @param sb
 * @throws KeeperException
 */
private static void getReplicationZnodesDump(ZooKeeperWatcher zkw, StringBuilder sb)
  throws KeeperException {
 String replicationZNodeName = zkw.getConfiguration().get("zookeeper.znode.replication",
  "replication");
 String replicationZnode = joinZNode(zkw.baseZNode, replicationZNodeName);
 if (ZKUtil.checkExists(zkw, replicationZnode) == -1) return;
 // do a ls -r on this znode
 sb.append("\n").append(replicationZnode).append(": ");
 List<String> children = ZKUtil.listChildrenNoWatch(zkw, replicationZnode);
 for (String child : children) {
  String znode = joinZNode(replicationZnode, child);
  if (child.equals(zkw.getConfiguration().get("zookeeper.znode.replication.peers", "peers"))) {
   appendPeersZnodes(zkw, znode, sb);
  } else if (child.equals(zkw.getConfiguration().
    get("zookeeper.znode.replication.rs", "rs"))) {
   appendRSZnodes(zkw, znode, sb);
  }
 }
}

代码示例来源:origin: harbby/presto-connectors

private static void appendPeerState(ZooKeeperWatcher zkw, String znodeToProcess,
  StringBuilder sb) throws KeeperException, InvalidProtocolBufferException {
 String peerState = zkw.getConfiguration().get("zookeeper.znode.replication.peers.state",
  "peer-state");
 int pblen = ProtobufUtil.lengthOfPBMagic();
 for (String child : ZKUtil.listChildrenNoWatch(zkw, znodeToProcess)) {
  if (!child.equals(peerState)) continue;
  String peerStateZnode = ZKUtil.joinZNode(znodeToProcess, child);
  sb.append("\n").append(peerStateZnode).append(": ");
  byte[] peerStateData;
  try {
   peerStateData = ZKUtil.getData(zkw, peerStateZnode);
   ZooKeeperProtos.ReplicationState.Builder builder =
     ZooKeeperProtos.ReplicationState.newBuilder();
   ProtobufUtil.mergeFrom(builder, peerStateData, pblen, peerStateData.length - pblen);
   sb.append(builder.getState().name());
  } catch (IOException ipbe) {
   LOG.warn("Got Exception while parsing peer: " + znodeToProcess, ipbe);
  } catch (InterruptedException e) {
   zkw.interruptedException(e);
   return;
  }
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testAcquireMultiTasks() throws Exception {
 LOG.info("testAcquireMultiTasks");
 SplitLogCounters.resetCounters();
 final String TATAS = "tatas";
 final ServerName RS = ServerName.valueOf("rs,1,1");
 final int maxTasks = 3;
 Configuration testConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
 testConf.setInt(HBASE_SPLIT_WAL_MAX_SPLITTER, maxTasks);
 RegionServerServices mockedRS = getRegionServer(RS);
 for (int i = 0; i < maxTasks; i++) {
  zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, TATAS + i),
   new SplitLogTask.Unassigned(ServerName.valueOf("mgr,1,1")).toByteArray(),
    Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
 }
 SplitLogWorker slw = new SplitLogWorker(ds, testConf, mockedRS, neverEndingTask);
 slw.start();
 try {
  waitForCounter(SplitLogCounters.tot_wkr_task_acquired, 0, maxTasks, WAIT_TIME);
  for (int i = 0; i < maxTasks; i++) {
   byte[] bytes = ZKUtil.getData(zkw, ZKSplitLog.getEncodedNodeName(zkw, TATAS + i));
   SplitLogTask slt = SplitLogTask.parseFrom(bytes);
   assertTrue(slt.isOwned(RS));
  }
 } finally {
  stopSplitLogWorker(slw);
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testTaskErr() throws Exception {
 LOG.info("TestTaskErr - cleanup task node once in ERR state");
 conf.setInt("hbase.splitlog.max.resubmit", 0);
 slm = new SplitLogManager(master, conf);
 TaskBatch batch = new TaskBatch();
 String tasknode = submitTaskAndWait(batch, "foo/1");
 final ServerName worker1 = ServerName.valueOf("worker1,1,1");
 SplitLogTask slt = new SplitLogTask.Err(worker1);
 ZKUtil.setData(zkw, tasknode, slt.toByteArray());
 synchronized (batch) {
  while (batch.installed != batch.error) {
   batch.wait();
  }
 }
 waitForCounter(tot_mgr_task_deleted, 0, 1, to/2);
 assertTrue(ZKUtil.checkExists(zkw, tasknode) == -1);
 conf.setInt("hbase.splitlog.max.resubmit", ZKSplitLogManagerCoordination.DEFAULT_MAX_RESUBMIT);
}

相关文章

微信公众号

最新文章

更多