org.apache.hadoop.hive.metastore.api.Partition.putToParameters()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(88)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Partition.putToParameters()方法的一些代码示例,展示了Partition.putToParameters()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.putToParameters()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Partition
类名称:Partition
方法名:putToParameters

Partition.putToParameters介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
@Override public String getLocation() { return partition.getSd().getLocation(); }

代码示例来源:origin: apache/hive

@Override
public void putToParameters(String key, String value) {
 partitionList.get(index).putToParameters(key, value);
}

代码示例来源:origin: apache/hive

@Override
 public void apply(Partition t, Object[] fields) {
  t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
 }});
// Perform conversion of null map values

代码示例来源:origin: apache/hive

@Override
 public void apply(Partition t, Object[] fields) {
  t.putToParameters((String) fields[1], extractSqlClob(fields[2]));
 }
});

代码示例来源:origin: apache/incubator-gobblin

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: apache/hive

@Override
public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
  throws SemanticException, IOException {
 TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
 try {
  // Remove all the entries from the parameters which are added by repl tasks internally.
  Map<String, String> parameters = partition.getParameters();
  if (parameters != null) {
   parameters.entrySet()
       .removeIf(e -> e.getKey().equals(ReplUtils.REPL_CHECKPOINT_KEY));
  }
  if (additionalPropertiesProvider.isInReplicationScope()) {
   // Current replication state must be set on the Partition object only for bootstrap dump.
   // Event replication State will be null in case of bootstrap dump.
   if (additionalPropertiesProvider.getReplSpecType()
       != ReplicationSpec.Type.INCREMENTAL_DUMP) {
    partition.putToParameters(
        ReplicationSpec.KEY.CURR_STATE_ID.toString(),
        additionalPropertiesProvider.getCurrentReplicationState());
   }
  }
  writer.jsonGenerator.writeString(serializer.toString(partition, UTF_8));
  writer.jsonGenerator.flush();
 } catch (TException e) {
  throw new SemanticException(ErrorMsg.ERROR_SERIALIZE_METASTORE.getMsg(), e);
 }
}

代码示例来源:origin: apache/hive

tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
 Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
  .currentTimeMillis() / 1000));

代码示例来源:origin: apache/hive

private static void adjust(HiveMetaStoreClient client, Partition part,
  String dbName, String tblName, boolean isThriftClient) throws TException {
 Partition part_get = client.getPartition(dbName, tblName, part.getValues());
 if (isThriftClient) {
  part.setCreateTime(part_get.getCreateTime());
  part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
 }
 part.setWriteId(part_get.getWriteId());
}

代码示例来源:origin: apache/hive

new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
 Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
  .currentTimeMillis() / 1000));

代码示例来源:origin: apache/drill

Partition tptn = partition.getTPartition();
if (replicationSpec.isInReplicationScope()){
 tptn.putToParameters(
   ReplicationSpec.KEY.CURR_STATE_ID.toString(), replicationSpec.getCurrentReplicationState());
 if ((tptn.getParameters().containsKey("EXTERNAL")) &&
   (tptn.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE"))){
  tptn.putToParameters("EXTERNAL", "FALSE");

代码示例来源:origin: apache/hive

part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));

代码示例来源:origin: org.spark-project.hive/hive-metastore

@Override
public void apply(Partition t, Object[] fields) {
 t.putToParameters((String)fields[1], (String)fields[2]);
}});

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public void apply(Partition t, Object[] fields) {
 t.putToParameters((String)fields[1], (String)fields[2]);
}});

代码示例来源:origin: org.spark-project.hive/hive-metastore

@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
@Override public String getLocation() { return partition.getSd().getLocation(); }

代码示例来源:origin: edu.berkeley.cs.shark/hive-metastore

public void apply(Partition t, Object[] fields) {
 t.putToParameters((String)fields[1], (String)fields[2]);
}});

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override public void putToParameters(String key, String value) { partition.putToParameters(key, value);}
@Override public String getLocation() { return partition.getSd().getLocation(); }

代码示例来源:origin: org.spark-project.hive/hive-metastore

@Override
public void putToParameters(String key, String value) {
 partitionList.get(index).putToParameters(key, value);
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public void putToParameters(String key, String value) {
 partitionList.get(index).putToParameters(key, value);
}

代码示例来源:origin: org.apache.hive/hive-standalone-metastore

@Override
public void putToParameters(String key, String value) {
 partitionList.get(index).putToParameters(key, value);
}

代码示例来源:origin: com.hotels/circus-train-avro

Partition apply(Partition partition, String avroSchemaDestination, String eventId) throws Exception {
  if (avroSchemaDestination == null) {
   return partition;
  }

  avroSchemaDestination = addTrailingSlash(avroSchemaDestination);
  avroSchemaDestination += eventId;

  String avroSchemaSource = partition.getParameters().get(AVRO_SCHEMA_URL_PARAMETER);
  copy(avroSchemaSource, avroSchemaDestination);

  partition.putToParameters(AVRO_SCHEMA_URL_PARAMETER,
    avroSchemaDestination + "/" + getAvroSchemaFileName(avroSchemaSource));
  LOG.info("Avro SerDe transformation has been applied to partition '{}'", partition.toString());
  return partition;
 }
}

相关文章

微信公众号

最新文章

更多

Partition类方法