本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getSerializationLib()
方法的一些代码示例,展示了Table.getSerializationLib()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getSerializationLib()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getSerializationLib
暂无
代码示例来源:origin: apache/hive
/**
* @param table
* @return true if the table has the parquet serde defined
*/
public static boolean isParquetTable(Table table) {
return table == null ? false : ParquetHiveSerDe.class.getName().equals(table.getSerializationLib());
}
代码示例来源:origin: apache/hive
private List<FieldSchema> getColsInternal(boolean forMs) {
String serializationLib = getSerializationLib();
try {
// Do the lightweight check for general case.
if (hasMetastoreBasedSchema(SessionState.getSessionConf(), serializationLib)) {
return tTable.getSd().getCols();
} else if (forMs && !shouldStoreFieldsInMetastore(
SessionState.getSessionConf(), serializationLib, tTable.getParameters())) {
return Hive.getFieldsFromDeserializerForMsStorage(this, getDeserializer());
} else {
return HiveMetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer());
}
} catch (Exception e) {
LOG.error("Unable to get field from serde: " + serializationLib, e);
}
return new ArrayList<FieldSchema>();
}
代码示例来源:origin: apache/drill
private List<FieldSchema> getColsInternal(boolean forMs) {
String serializationLib = getSerializationLib();
try {
// Do the lightweight check for general case.
if (hasMetastoreBasedSchema(SessionState.getSessionConf(), serializationLib)) {
return tTable.getSd().getCols();
} else if (forMs && !shouldStoreFieldsInMetastore(
SessionState.getSessionConf(), serializationLib, tTable.getParameters())) {
return Hive.getFieldsFromDeserializerForMsStorage(this, getDeserializer());
} else {
return MetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer());
}
} catch (Exception e) {
LOG.error("Unable to get field from serde: " + serializationLib, e);
}
return new ArrayList<FieldSchema>();
}
代码示例来源:origin: apache/hive
private void alterPartitionSpecInMemory(Table tbl,
Map<String, String> partSpec,
org.apache.hadoop.hive.metastore.api.Partition tpart,
boolean inheritTableSpecs,
String partPath) throws HiveException, InvalidOperationException {
LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
+ partSpec);
if (inheritTableSpecs) {
tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
tpart.getSd().getSerdeInfo().setParameters(
tbl.getTTable().getSd().getSerdeInfo().getParameters());
tpart.getSd().setBucketCols(tbl.getBucketCols());
tpart.getSd().setNumBuckets(tbl.getNumBuckets());
tpart.getSd().setSortCols(tbl.getSortCols());
}
if (partPath == null || partPath.trim().equals("")) {
throw new HiveException("new partition path should not be null or empty.");
}
tpart.getSd().setLocation(partPath);
}
代码示例来源:origin: apache/drill
private void alterPartitionSpecInMemory(Table tbl,
Map<String, String> partSpec,
org.apache.hadoop.hive.metastore.api.Partition tpart,
boolean inheritTableSpecs,
String partPath) throws HiveException, InvalidOperationException {
LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
+ partSpec);
if (inheritTableSpecs) {
tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
tpart.getSd().getSerdeInfo().setParameters(
tbl.getTTable().getSd().getSerdeInfo().getParameters());
tpart.getSd().setBucketCols(tbl.getBucketCols());
tpart.getSd().setNumBuckets(tbl.getNumBuckets());
tpart.getSd().setSortCols(tbl.getSortCols());
}
if (partPath == null || partPath.trim().equals("")) {
throw new HiveException("new partition path should not be null or empty.");
}
tpart.getSd().setLocation(partPath);
}
代码示例来源:origin: apache/hive
.getMsg(" Table inputformat/outputformats do not match"));
String existingSerde = table.getSerializationLib();
String importedSerde = tableDesc.getSerName();
if (!existingSerde.equals(importedSerde)) {
代码示例来源:origin: apache/drill
.getMsg(" Table inputformat/outputformats do not match"));
String existingSerde = table.getSerializationLib();
String importedSerde = tableDesc.getSerName();
if (!existingSerde.equals(importedSerde)) {
代码示例来源:origin: apache/hive
+ "; " + tbl.getTTable() + ")", ft.getTTable().equals(tbl.getTTable()));
assertEquals("SerializationLib is not set correctly", tbl
.getSerializationLib(), ft.getSerializationLib());
assertEquals("Serde is not set correctly", tbl.getDeserializer()
.getClass().getName(), ft.getDeserializer().getClass().getName());
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
public List<FieldSchema> getCols() {
boolean getColsFromSerDe = SerDeUtils.shouldGetColsFromSerDe(
getSerializationLib());
if (!getColsFromSerDe) {
return tTable.getSd().getCols();
} else {
try {
return Hive.getFieldsFromDeserializer(getTableName(), getDeserializer());
} catch (HiveException e) {
LOG.error("Unable to get field from serde: " + getSerializationLib(), e);
}
return new ArrayList<FieldSchema>();
}
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
public List<FieldSchema> getCols() {
String serializationLib = getSerializationLib();
try {
if (hasMetastoreBasedSchema(SessionState.getSessionConf(), serializationLib)) {
return tTable.getSd().getCols();
} else {
return MetaStoreUtils.getFieldsFromDeserializer(getTableName(), getDeserializer());
}
} catch (Exception e) {
LOG.error("Unable to get field from serde: " + serializationLib, e);
}
return new ArrayList<FieldSchema>();
}
代码示例来源:origin: org.apache.lens/lens-cube
tblDesc.setMapKeyDelimiter(tbl.getSerdeParam(serdeConstants.MAPKEY_DELIM));
tblDesc.setEscapeChar(tbl.getSerdeParam(serdeConstants.ESCAPE_CHAR));
tblDesc.setSerdeClassName(tbl.getSerializationLib());
tblDesc.setStorageHandlerName(tbl.getStorageHandler() != null
? tbl.getStorageHandler().getClass().getCanonicalName() : "");
代码示例来源:origin: apache/lens
tblDesc.setMapKeyDelimiter(tbl.getSerdeParam(serdeConstants.MAPKEY_DELIM));
tblDesc.setEscapeChar(tbl.getSerdeParam(serdeConstants.ESCAPE_CHAR));
tblDesc.setSerdeClassName(tbl.getSerializationLib());
tblDesc.setStorageHandlerName(tbl.getStorageHandler() != null
? tbl.getStorageHandler().getClass().getCanonicalName() : "");
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
if (partPath == null || partPath.trim().equals("")) {
throw new HiveException("new partition path should not be null or empty.");
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
List<FieldSchema> newCols = alterTbl.getNewCols();
List<FieldSchema> oldCols = tbl.getCols();
if (tbl.getSerializationLib().equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
if (tbl.getSerializationLib().equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
tbl.setSerializationLib(LazySimpleSerDe.class.getName());
} else if (!tbl.getSerializationLib().equals(
MetadataTypedColumnsetSerDe.class.getName())
&& !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
&& !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
&& !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
console.printError("Replace columns is not supported for this table. "
+ "SerDe may be incompatible.");
代码示例来源:origin: com.facebook.presto.hive/hive-apache
.getMsg(" Table inputformat/outputformats do not match"));
String existingSerde = table.getSerializationLib();
String importedSerde = tableDesc.getSerName();
if (!existingSerde.equals(importedSerde)) {
代码示例来源:origin: com.facebook.presto.hive/hive-apache
private void alterPartitionSpec(Table tbl,
Map<String, String> partSpec,
org.apache.hadoop.hive.metastore.api.Partition tpart,
boolean inheritTableSpecs,
String partPath) throws HiveException, InvalidOperationException {
LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
+ partSpec);
if (inheritTableSpecs) {
tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
tpart.getSd().getSerdeInfo().setParameters(
tbl.getTTable().getSd().getSerdeInfo().getParameters());
tpart.getSd().setBucketCols(tbl.getBucketCols());
tpart.getSd().setNumBuckets(tbl.getNumBuckets());
tpart.getSd().setSortCols(tbl.getSortCols());
}
if (partPath == null || partPath.trim().equals("")) {
throw new HiveException("new partition path should not be null or empty.");
}
tpart.getSd().setLocation(partPath);
tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true");
String fullName = tbl.getTableName();
if (!com.facebook.presto.hive.$internal.org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getDbName() + "." + tbl.getTableName();
}
alterPartition(fullName, new Partition(tbl, tpart));
}
内容来源于网络,如有侵权,请联系作者删除!