本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getColumnTypes()
方法的一些代码示例,展示了Utilities.getColumnTypes()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getColumnTypes()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getColumnTypes
暂无
代码示例来源:origin: apache/hive
private void initFromProperties(final Properties properties) throws SerDeException {
final List<String> columnNames = new ArrayList<>(Utilities.getColumnNames(properties));
if (!columnNames.contains(DruidConstants.DEFAULT_TIMESTAMP_COLUMN)) {
throw new SerDeException("Timestamp column (' "
+ DruidConstants.DEFAULT_TIMESTAMP_COLUMN
+ "') not specified in create table; list of columns is : "
+ properties.getProperty(serdeConstants.LIST_COLUMNS));
}
final List<PrimitiveTypeInfo>
columnTypes =
Utilities.getColumnTypes(properties)
.stream()
.map(TypeInfoFactory::getPrimitiveTypeInfo)
.collect(Collectors.toList())
.stream()
.map(e -> e instanceof TimestampLocalTZTypeInfo ? tsTZTypeInfo : e)
.collect(Collectors.toList());
final List<ObjectInspector>
inspectors =
columnTypes.stream()
.map(PrimitiveObjectInspectorFactory::getPrimitiveJavaObjectInspector)
.collect(Collectors.toList());
columns = columnNames.toArray(new String[0]);
types = columnTypes.toArray(new PrimitiveTypeInfo[0]);
inspector = ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, inspectors);
}
代码示例来源:origin: apache/hive
private static void serializeObject(Properties properties,
DruidSerDe serDe,
Object[] rowObject,
DruidWritable druidWritable) throws SerDeException {
// Build OI with timestamp granularity column
final List<String> columnNames = new ArrayList<>(Utilities.getColumnNames(properties));
columnNames.add(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME);
final List<PrimitiveTypeInfo>
columnTypes =
Utilities.getColumnTypes(properties)
.stream()
.map(TypeInfoFactory::getPrimitiveTypeInfo)
.collect(Collectors.toList());
columnTypes.add(TypeInfoFactory.getPrimitiveTypeInfo("timestamp"));
List<ObjectInspector>
inspectors =
columnTypes.stream()
.map(PrimitiveObjectInspectorFactory::getPrimitiveWritableObjectInspector)
.collect(Collectors.toList());
ObjectInspector inspector = ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, inspectors);
// Serialize
DruidWritable writable = (DruidWritable) serDe.serialize(rowObject, inspector);
// Check result
assertEquals(druidWritable.getValue().size(), writable.getValue().size());
for (Entry<String, Object> e : druidWritable.getValue().entrySet()) {
assertEquals(e.getValue(), writable.getValue().get(e.getKey()));
}
}
代码示例来源:origin: apache/hive
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: apache/hive
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: apache/drill
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: apache/drill
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: com.facebook.presto.hive/hive-apache
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
代码示例来源:origin: com.facebook.presto.hive/hive-apache
List<String> joinKeys = Utilities
.getColumnNames(keyTblDesc.getProperties());
List<String> joinKeyTypes = Utilities.getColumnTypes(keyTblDesc
.getProperties());
内容来源于网络,如有侵权,请联系作者删除!