本文整理了Java中org.apache.pig.ResourceSchema.setFields
方法的一些代码示例,展示了ResourceSchema.setFields
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ResourceSchema.setFields
方法的具体详情如下:
包路径:org.apache.pig.ResourceSchema
类名称:ResourceSchema
方法名:setFields
[英]Set all the fields. If fields are not currently null the new fields will be silently ignored.
[中]设置所有字段。如果字段当前不为空,则新字段将被静默忽略。
代码示例来源:origin: apache/hive
public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema hfs : hcatSchema.getFields()) {
ResourceFieldSchema rfSchema;
rfSchema = getResourceSchemaFromFieldSchema(hfs);
rfSchemaList.add(rfSchema);
}
ResourceSchema rSchema = new ResourceSchema();
rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[rfSchemaList.size()]));
return rSchema;
}
代码示例来源:origin: apache/hive
private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
// for each struct subfield, create equivalent ResourceFieldSchema
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
lrfs.add(getResourceSchemaFromFieldSchema(subField));
}
s.setFields(lrfs.toArray(new ResourceFieldSchema[lrfs.size()]));
return s;
}
代码示例来源:origin: apache/hive
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = Arrays.asList(getResourceSchemaFromFieldSchema(arrayElementFieldSchema));
s.setFields(lrfs.toArray(new ResourceFieldSchema[lrfs.size()]));
bagSubFieldSchemas[0].setSchema(s);
} else {
.setType(getPigType(arrayElementFieldSchema))
bagSubFieldSchemas[0].setSchema(new ResourceSchema().setFields(innerTupleFieldSchemas));
return new ResourceSchema().setFields(bagSubFieldSchemas);
代码示例来源:origin: apache/phoenix
fields[i++] = field;
schema.setFields(fields);
} catch(SQLException sqle) {
LOG.error(String.format("Error: SQLException [%s] ",sqle.getMessage()));
代码示例来源:origin: apache/phoenix
@Test
public void testSchema() throws SQLException, IOException {
final Configuration configuration = mock(Configuration.class);
when(configuration.get(PhoenixConfigurationUtil.SCHEMA_TYPE)).thenReturn(SchemaType.TABLE.name());
final ResourceSchema actual = PhoenixPigSchemaUtil.getResourceSchema(
configuration, new Dependencies() {
List<ColumnInfo> getSelectColumnMetadataList(
Configuration configuration) throws SQLException {
return Lists.newArrayList(ID_COLUMN, NAME_COLUMN);
}
});
// expected schema.
final ResourceFieldSchema[] fields = new ResourceFieldSchema[2];
fields[0] = new ResourceFieldSchema().setName("ID")
.setType(DataType.LONG);
fields[1] = new ResourceFieldSchema().setName("NAME")
.setType(DataType.CHARARRAY);
final ResourceSchema expected = new ResourceSchema().setFields(fields);
assertEquals(expected.toString(), actual.toString());
}
代码示例来源:origin: com.cloudera.recordservice/recordservice-hcatalog-pig-adapter
public static ResourceSchema getResourceSchema(HCatSchema hcatSchema)
throws IOException {
List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema hfs : hcatSchema.getFields()) {
ResourceFieldSchema rfSchema;
rfSchema = getResourceSchemaFromFieldSchema(hfs);
rfSchemaList.add(rfSchema);
}
ResourceSchema rSchema = new ResourceSchema();
rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[rfSchemaList.size()]));
return rSchema;
}
代码示例来源:origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-pig-adapter
public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema hfs : hcatSchema.getFields()) {
ResourceFieldSchema rfSchema;
rfSchema = getResourceSchemaFromFieldSchema(hfs);
rfSchemaList.add(rfSchema);
}
ResourceSchema rSchema = new ResourceSchema();
rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[rfSchemaList.size()]));
return rSchema;
}
代码示例来源:origin: org.apache.hive.hcatalog/hive-hcatalog-pig-adapter
public static ResourceSchema getResourceSchema(HCatSchema hcatSchema) throws IOException {
List<ResourceFieldSchema> rfSchemaList = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema hfs : hcatSchema.getFields()) {
ResourceFieldSchema rfSchema;
rfSchema = getResourceSchemaFromFieldSchema(hfs);
rfSchemaList.add(rfSchema);
}
ResourceSchema rSchema = new ResourceSchema();
rSchema.setFields(rfSchemaList.toArray(new ResourceFieldSchema[rfSchemaList.size()]));
return rSchema;
}
代码示例来源:origin: com.github.hyukjinkwon.hcatalog/hive-hcatalog-pig-adapter
private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
// for each struct subfield, create equivalent ResourceFieldSchema
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
lrfs.add(getResourceSchemaFromFieldSchema(subField));
}
s.setFields(lrfs.toArray(new ResourceFieldSchema[lrfs.size()]));
return s;
}
代码示例来源:origin: com.cloudera.recordservice/recordservice-hcatalog-pig-adapter
private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs)
throws IOException {
// for each struct subfield, create equivalent ResourceFieldSchema
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
lrfs.add(getResourceSchemaFromFieldSchema(subField));
}
s.setFields(lrfs.toArray(new ResourceFieldSchema[lrfs.size()]));
return s;
}
代码示例来源:origin: org.apache.hive.hcatalog/hive-hcatalog-pig-adapter
private static ResourceSchema getTupleSubSchema(HCatFieldSchema hfs) throws IOException {
// for each struct subfield, create equivalent ResourceFieldSchema
ResourceSchema s = new ResourceSchema();
List<ResourceFieldSchema> lrfs = new ArrayList<ResourceFieldSchema>();
for (HCatFieldSchema subField : hfs.getStructSubSchema().getFields()) {
lrfs.add(getResourceSchemaFromFieldSchema(subField));
}
s.setFields(lrfs.toArray(new ResourceFieldSchema[lrfs.size()]));
return s;
}
代码示例来源:origin: Netflix/iceberg
public static ResourceSchema convert(Schema icebergSchema) throws IOException {
ResourceSchema result = new ResourceSchema();
result.setFields(convertFields(icebergSchema.columns()));
return result;
}
代码示例来源:origin: org.apache.pig/pig
@Override
public Schema outputSchema(Schema input) {
try {
if (!inited) {
schemaAndEvaluatorInfo.init(getInputSchema(), instantiateUDAF(funcName), Mode.COMPLETE, constantsInfo);
inited = true;
}
ResourceFieldSchema rfs = HiveUtils.getResourceFieldSchema(schemaAndEvaluatorInfo.outputTypeInfo);
ResourceSchema outputSchema = new ResourceSchema();
outputSchema.setFields(new ResourceFieldSchema[] {rfs});
return Schema.getPigSchema(outputSchema);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
代码示例来源:origin: org.apache.pig/pig
@Override
public Schema outputSchema(Schema input) {
try {
if (!inited) {
schemaInfo.init(getInputSchema(), evalUDF, constantsInfo);
inited = true;
}
ResourceFieldSchema rfs = HiveUtils.getResourceFieldSchema(
TypeInfoUtils.getTypeInfoFromObjectInspector(schemaInfo.outputObjectInspector));
ResourceSchema outputSchema = new ResourceSchema();
outputSchema.setFields(new ResourceFieldSchema[] {rfs});
return Schema.getPigSchema(outputSchema);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
代码示例来源:origin: com.facebook.presto.cassandra/cassandra-server
/** schema: (value, value, value) where keys are in the front. */
public ResourceSchema getSchema(String location, Job job) throws IOException
{
setLocation(location, job);
CfInfo cfInfo = getCfInfo(loadSignature);
CfDef cfDef = cfInfo.cfDef;
// top-level schema, no type
ResourceSchema schema = new ResourceSchema();
// get default marshallers and validators
Map<MarshallerType, AbstractType> marshallers = getDefaultMarshallers(cfDef);
Map<ByteBuffer, AbstractType> validators = getValidatorMap(cfDef);
// will contain all fields for this schema
List<ResourceFieldSchema> allSchemaFields = new ArrayList<ResourceFieldSchema>();
for (ColumnDef cdef : cfDef.column_metadata)
{
ResourceFieldSchema valSchema = new ResourceFieldSchema();
AbstractType validator = validators.get(cdef.name);
if (validator == null)
validator = marshallers.get(MarshallerType.DEFAULT_VALIDATOR);
valSchema.setName(new String(cdef.getName()));
valSchema.setType(getPigType(validator));
allSchemaFields.add(valSchema);
}
// top level schema contains everything
schema.setFields(allSchemaFields.toArray(new ResourceFieldSchema[allSchemaFields.size()]));
return schema;
}
代码示例来源:origin: nl.basjes.parse.httpdlog/httpdlog-pigloader
rs.setFields(fieldSchemaList.toArray(new ResourceFieldSchema[fieldSchemaList.size()]));
return rs;
代码示例来源:origin: org.apache.pig/pig
/**
* This method adds FieldSchema of 'input source tag/path' as the first
* field. This will be called only when PigStorage is invoked with
* '-tagFile' or '-tagPath' option and the schema file is present to be
* loaded.
*
* @param schema
* @param fieldName
* @return ResourceSchema
*/
public static ResourceSchema getSchemaWithInputSourceTag(ResourceSchema schema, String fieldName) {
ResourceFieldSchema[] fieldSchemas = schema.getFields();
ResourceFieldSchema sourceTagSchema = new ResourceFieldSchema(new FieldSchema(fieldName, DataType.CHARARRAY));
ResourceFieldSchema[] fieldSchemasWithSourceTag = new ResourceFieldSchema[fieldSchemas.length + 1];
fieldSchemasWithSourceTag[0] = sourceTagSchema;
for(int j = 0; j < fieldSchemas.length; j++) {
fieldSchemasWithSourceTag[j + 1] = fieldSchemas[j];
}
return schema.setFields(fieldSchemasWithSourceTag);
}
代码示例来源:origin: org.apache.pig/pig
@Override
public Schema outputSchema(Schema input) {
try {
if (!inited) {
schemaInfo.init(getInputSchema(), udtf, constantsInfo);
inited = true;
}
ResourceFieldSchema rfs = HiveUtils.getResourceFieldSchema(
TypeInfoUtils.getTypeInfoFromObjectInspector(schemaInfo.outputObjectInspector));
ResourceSchema tupleSchema = new ResourceSchema();
tupleSchema.setFields(new ResourceFieldSchema[] {rfs});
ResourceFieldSchema bagFieldSchema = new ResourceFieldSchema();
bagFieldSchema.setType(DataType.BAG);
bagFieldSchema.setSchema(tupleSchema);
ResourceSchema bagSchema = new ResourceSchema();
bagSchema.setFields(new ResourceFieldSchema[] {bagFieldSchema});
return Schema.getPigSchema(bagSchema);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
代码示例来源:origin: com.twitter.elephantbird/elephant-bird-pig
@Override
public ResourceSchema getSchema(String location, Job job) throws IOException {
// determine key field schema
ResourceFieldSchema keySchema = config.keyConverter.getLoadSchema();
if (keySchema == null) {
return null;
}
keySchema.setName("key");
// determine value field schema
ResourceFieldSchema valueSchema = config.valueConverter.getLoadSchema();
if (valueSchema == null) {
return null;
}
valueSchema.setName("value");
// return tuple schema
ResourceSchema resourceSchema = new ResourceSchema();
resourceSchema.setFields(new ResourceFieldSchema[] { keySchema, valueSchema });
return resourceSchema;
}
代码示例来源:origin: com.twitter.elephantbird/elephant-bird-pig
/**
* Creates a new ResourceFieldSchema which reflects data from an input RequiredField.
*
* @param field
* @return new ResourceFieldSchema which reflects {@code field}.
* @throws IOException
*/
public static ResourceFieldSchema createResourceFieldSchema(RequiredField field)
throws IOException {
ResourceFieldSchema schema =
new ResourceFieldSchema().setName(field.getAlias()).setType(field.getType());
List<RequiredField> subFields = field.getSubFields();
if (subFields != null && !subFields.isEmpty()) {
ResourceFieldSchema[] subSchemaFields = new ResourceFieldSchema[subFields.size()];
int i = 0;
for (RequiredField subField : subFields) {
subSchemaFields[i++] = createResourceFieldSchema(subField);
}
ResourceSchema subSchema = new ResourceSchema();
subSchema.setFields(subSchemaFields);
schema.setSchema(subSchema);
}
return schema;
}
内容来源于网络,如有侵权,请联系作者删除!