org.apache.hadoop.hive.ql.metadata.Hive.closeCurrent()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(7.7k)|赞(0)|评价(0)|浏览(301)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.closeCurrent()方法的一些代码示例,展示了Hive.closeCurrent()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.closeCurrent()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:closeCurrent

Hive.closeCurrent介绍

暂无

代码示例

代码示例来源:origin: apache/hive

void close() {
 if (hive != null) {
  runAndLogErrors(() -> Hive.closeCurrent());
  hive = null;
 }
}

代码示例来源:origin: apache/hive

@Override
public synchronized void stop() {
 super.stop();
 // Release the HMS connection for this service thread
 Hive.closeCurrent();
}

代码示例来源:origin: apache/hive

@Override
public void run() {
 runner = Thread.currentThread();
 try {
  SessionState.start(ss);
  runSequential();
 } finally {
  try {
   // Call Hive.closeCurrent() that closes the HMS connection, causes
   // HMS connection leaks otherwise.
   Hive.closeCurrent();
  } catch (Exception e) {
   LOG.warn("Exception closing Metastore connection:" + e.getMessage());
  }
  runner = null;
  result.setRunning(false);
 }
}

代码示例来源:origin: apache/drill

@Override
public void run() {
 runner = Thread.currentThread();
 try {
  OperationLog.setCurrentOperationLog(operationLog);
  SessionState.start(ss);
  runSequential();
 } finally {
  try {
   // Call Hive.closeCurrent() that closes the HMS connection, causes
   // HMS connection leaks otherwise.
   Hive.closeCurrent();
  } catch (Exception e) {
   LOG.warn("Exception closing Metastore connection:" + e.getMessage());
  }
  runner = null;
  result.setRunning(false);
 }
}

代码示例来源:origin: apache/hive

/**
 * Test basic Hive class interaction, that:
 * - We can have different Hive objects throughout the lifetime of this thread.
 */
public void testHiveCloseCurrent() throws Throwable {
 Hive hive1 = Hive.get();
 Hive.closeCurrent();
 Hive hive2 = Hive.get();
 Hive.closeCurrent();
 assertTrue(hive1 != hive2);
}

代码示例来源:origin: apache/drill

private static Hive create(HiveConf c, boolean needsRefresh, Hive db, boolean doRegisterAllFns)
  throws HiveException {
 if (db != null) {
  LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh +
   ", db.isCurrentUserOwner = " + db.isCurrentUserOwner());
  db.close();
 }
 closeCurrent();
 if (c == null) {
  c = createHiveConf();
 }
 c.set("fs.scheme.class", "dfs");
 Hive newdb = new Hive(c, doRegisterAllFns);
 hiveDB.set(newdb);
 return newdb;
}

代码示例来源:origin: apache/hive

private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFastCheck,
  boolean doRegisterAllFns) throws HiveException {
 Hive db = hiveDB.get();
 if (db == null || !db.isCurrentUserOwner() || needsRefresh
   || (c != null && !isCompatible(db, c, isFastCheck))) {
  if (db != null) {
   LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh +
       ", db.isCurrentUserOwner = " + db.isCurrentUserOwner());
   closeCurrent();
  }
  db = create(c, doRegisterAllFns);
 }
 if (c != null) {
  db.conf = c;
 }
 return db;
}

代码示例来源:origin: apache/hive

@After
public void tearDown() throws Exception {
 dropDbTable();
 Hive.closeCurrent();
}

代码示例来源:origin: apache/hive

@Override
protected void tearDown() throws Exception {
 try {
  super.tearDown();
  // disable trash
  hiveConf.setFloat("fs.trash.checkpoint.interval", 30);  // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2)
  hiveConf.setFloat("fs.trash.interval", 30);             // FS_TRASH_INTERVAL_KEY (hadoop-2)
  Hive.closeCurrent();
 } catch (Exception e) {
  System.err.println(StringUtils.stringifyException(e));
  System.err
    .println("Unable to close Hive Metastore using configruation: \n "
    + hiveConf);
  throw e;
 }
}

代码示例来源:origin: apache/hive

} finally {
 Hive.closeCurrent();

代码示例来源:origin: apache/hive

public String getDelegationTokenFromMetaStore(String owner)
  throws HiveSQLException, UnsupportedOperationException, LoginException, IOException {
 HiveConf hiveConf = getHiveConf();
 if (!hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL) ||
   !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
  throw new UnsupportedOperationException(
    "delegation token is can only be obtained for a secure remote metastore");
 }
 try {
  Hive.closeCurrent();
  return Hive.get(hiveConf).getDelegationToken(owner, owner);
 } catch (HiveException e) {
  if (e.getCause() instanceof UnsupportedOperationException) {
   throw (UnsupportedOperationException)e.getCause();
  } else {
   throw new HiveSQLException("Error connect metastore to setup impersonation", e);
  }
 }
}

代码示例来源:origin: apache/hive

Hive.closeCurrent();
} catch (Throwable t) {
 LOG.warn("Error closing thread local Hive", t);

代码示例来源:origin: apache/hive

private IMetaStoreClient getMetaStoreClient(boolean retryInCaseOfTokenExpiration) throws HiveSQLException {
 try {
  return Hive.get(getHiveConf()).getMSC();
 } catch (HiveException e) {
  throw new HiveSQLException("Failed to get metastore connection: " + e, e);
 } catch(MetaException e1) {
  if (hmsDelegationTokenStr != null && retryInCaseOfTokenExpiration) {
   LOG.info("Retrying failed metastore connection: " + e1, e1);
   Hive.closeCurrent();
   try {
    setDelegationToken(Hive.get(getHiveConf()).getDelegationToken(sessionUgi.getUserName(), getUserName()));
   } catch (HiveException e2) {
    throw new HiveSQLException("Error connect metastore to setup impersonation: " + e2, e2);
   }
   return getMetaStoreClient(false);
  } else {
   throw new HiveSQLException("Failed to get metastore connection: " + e1, e1);
  }
 }
}

代码示例来源:origin: apache/drill

} finally {
 Hive.closeCurrent();

代码示例来源:origin: apache/hive

@Override
 public Object run() throws HiveSQLException {
  assert (!parentHive.allowClose());
  Hive.set(parentHive);
  // TODO: can this result in cross-thread reuse of session state?
  SessionState.setCurrentSessionState(parentSessionState);
  PerfLogger.setPerfLogger(SessionState.getPerfLogger());
  LogUtils.registerLoggingContext(queryState.getConf());
  ShimLoader.getHadoopShims().setHadoopQueryContext(queryState.getQueryId());
  try {
   if (asyncPrepare) {
    prepare(queryState);
   }
   runQuery();
  } catch (HiveSQLException e) {
   // TODO: why do we invent our own error path op top of the one from Future.get?
   setOperationException(e);
   LOG.error("Error running hive query: ", e);
  } finally {
   LogUtils.unregisterLoggingContext();
   // If new hive object is created  by the child thread, then we need to close it as it might
   // have created a hms connection. Call Hive.closeCurrent() that closes the HMS connection, causes
   // HMS connection leaks otherwise.
   Hive.closeCurrent();
  }
  return null;
 }
};

代码示例来源:origin: apache/phoenix

public void shutdown() throws Exception {
 if (System.getenv(QTEST_LEAVE_FILES) == null) {
  cleanUp();
 }
 if (clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
  SessionState.get().getTezSession().destroy();
 }
 
 setup.tearDown();
 if (sparkSession != null) {
  try {
   SparkSessionManagerImpl.getInstance().closeSession(sparkSession);
  } catch (Exception ex) {
   LOG.error("Error closing spark session.", ex);
  } finally {
   sparkSession = null;
  }
 }
 if (mr != null) {
  mr.shutdown();
  mr = null;
 }
 FileSystem.closeAll();
 if (dfs != null) {
  dfs.shutdown();
  dfs = null;
 }
 Hive.closeCurrent();
}

代码示例来源:origin: org.apache.hive/hive-service

@Override
public synchronized void stop() {
 super.stop();
 // Release the HMS connection for this service thread
 Hive.closeCurrent();
}

代码示例来源:origin: org.apache.spark/spark-hive-thriftserver_2.11

private void cancelDelegationToken() throws HiveSQLException {
 if (delegationTokenStr != null) {
  try {
   Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr);
  } catch (HiveException e) {
   throw new HiveSQLException("Couldn't cancel delegation token", e);
  }
  // close the metastore connection created with this delegation token
  Hive.closeCurrent();
 }
}

代码示例来源:origin: com.github.hyukjinkwon/hive-service

private void cancelDelegationToken() throws HiveSQLException {
 if (delegationTokenStr != null) {
  try {
   Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr);
  } catch (HiveException e) {
   throw new HiveSQLException("Couldn't cancel delegation token", e);
  }
  // close the metastore connection created with this delegation token
  Hive.closeCurrent();
 }
}

代码示例来源:origin: org.spark-project.hive/hive-service

private void cancelDelegationToken() throws HiveSQLException {
 if (delegationTokenStr != null) {
  try {
   Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr);
  } catch (HiveException e) {
   throw new HiveSQLException("Couldn't cancel delegation token", e);
  }
  // close the metastore connection created with this delegation token
  Hive.closeCurrent();
 }
}

相关文章

微信公众号

最新文章

更多

Hive类方法