org.apache.hadoop.io.Text.readFields()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(7.6k)|赞(0)|评价(0)|浏览(138)

本文整理了Java中org.apache.hadoop.io.Text.readFields()方法的一些代码示例,展示了Text.readFields()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Text.readFields()方法的具体详情如下:
包路径:org.apache.hadoop.io.Text
类名称:Text
方法名:readFields

Text.readFields介绍

[英]deserialize
[中]反序列化

代码示例

代码示例来源:origin: apache/hive

public void readFields(DataInput in) throws IOException {
 value.readFields(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void readFields(DataInput in) throws IOException {
 int sz = in.readInt();
 if (sz < 0) throw new IOException("Invalid size: " + sz + " for file metadata object");
 this.theMetadata = new TreeMap<Text, Text>();
 for (int i = 0; i < sz; i++) {
  Text key = new Text();
  Text val = new Text();
  key.readFields(in);
  val.readFields(in);
  this.theMetadata.put(key, val);
 }    
}

代码示例来源:origin: apache/kylin

@Override
public void readFields(DataInput dataInput) throws IOException {
  this.typeId = dataInput.readByte();
  Text inputKey = new Text();
  inputKey.readFields(dataInput);
  init(inputKey, typeId);
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public void readFields(DataInput in) throws IOException {
 Text text = new Text();
 text.readFields(in);
 this.jobId = text.toString().intern();
 text.readFields(in);
 this.taskId = text.toString().intern();
 this.setId(this.taskId);
 this.startTime = in.readLong();
 this.endTime = in.readLong();
 this.duration = in.readLong();
 super.readFields(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void readFields(DataInput in) throws IOException {
 int len = WritableUtils.readVInt(in);
 if (identifier == null || identifier.length != len) {
  identifier = new byte[len];
 }
 in.readFully(identifier);
 len = WritableUtils.readVInt(in);
 if (password == null || password.length != len) {
  password = new byte[len];
 }
 in.readFully(password);
 kind.readFields(in);
 service.readFields(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void readFields(DataInput in) throws IOException {
 byte version = in.readByte();
 if (version != VERSION) {
 throw new IOException("Unknown version of delegation token " + 
              version);
 }
 owner.readFields(in, Text.DEFAULT_MAX_LEN);
 renewer.readFields(in, Text.DEFAULT_MAX_LEN);
 realUser.readFields(in, Text.DEFAULT_MAX_LEN);
 issueDate = WritableUtils.readVLong(in);
 maxDate = WritableUtils.readVLong(in);
 sequenceNumber = WritableUtils.readVInt(in);
 masterKeyId = WritableUtils.readVInt(in);
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public void readFields(DataInput in)
  throws IOException {
 Text text = new Text();
 text.readFields(in);
 this.jobName = text.toString().intern();
 text.readFields(in);
 this.jobId = text.toString().intern();
 this.setId(this.jobId);
 this.startTime = in.readLong();
 this.endTime = in.readLong();
 this.duration = in.readLong();
 text.readFields(in);
 this.state = RunningState.valueOf(text.toString());
 this.taskCount = in.readInt();
 int numTaskStates = in.readInt();
 getTaskStateWithCommonAndSpecWuProps(numTaskStates, in);
 super.readFields(in);
}

代码示例来源:origin: apache/hive

@Override
public void readFields(DataInput dataInput) throws IOException {
 data = new Text();
 data.readFields(dataInput);
 boolean notNull = dataInput.readBoolean();
 if (notNull) {
  partVal = new Text();
  partVal.readFields(dataInput);
 }
}

代码示例来源:origin: apache/incubator-gobblin

T state = this.stateClass.newInstance();
key.readFields(dis);
state.readFields(dis);

代码示例来源:origin: apache/incubator-gobblin

T state = this.stateClass.newInstance();
key.readFields(dis);
state.readFields(dis);
states.add(state);

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Loads all the keys.
 * @param in
 * @throws IOException
 */
@Override
public void readFields(DataInput in) throws IOException {
 secretKeysMap.clear();
 tokenMap.clear();
 int size = WritableUtils.readVInt(in);
 for(int i=0; i<size; i++) {
  Text alias = new Text();
  alias.readFields(in);
  Token<? extends TokenIdentifier> t = new Token<TokenIdentifier>();
  t.readFields(in);
  tokenMap.put(alias, t);
 }
 size = WritableUtils.readVInt(in);
 for(int i=0; i<size; i++) {
  Text alias = new Text();
  alias.readFields(in);
  int len = WritableUtils.readVInt(in);
  byte[] value = new byte[len];
  in.readFully(value);
  secretKeysMap.put(alias, value);
 }
}

代码示例来源:origin: apache/hive

@Override
public void readFields(DataInput dataInput) throws IOException {
 data = new Text();
 data.readFields(dataInput);
 boolean notNull = dataInput.readBoolean();
 if (notNull) {
  partVal = new Text();
  partVal.readFields(dataInput);
 }
 notNull = dataInput.readBoolean();
 if (notNull) {
  recId = new RecordIdentifier();
  recId.readFields(dataInput);
 }
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void testSerialize()
  throws IOException {
 // Use our serializer, verify Hadoop deserializer can read it back
 for (String textToSerialize : textsToSerialize) {
  ByteArrayOutputStream bOs = new ByteArrayOutputStream();
  DataOutputStream dataOutputStream = new DataOutputStream(bOs);
  TextSerializer.writeStringAsText(dataOutputStream, textToSerialize);
  dataOutputStream.close();
  ByteArrayInputStream bIn = new ByteArrayInputStream(bOs.toByteArray());
  DataInputStream dataInputStream = new DataInputStream(bIn);
  Text hadoopText = new Text();
  hadoopText.readFields(dataInputStream);
  Assert.assertEquals(hadoopText.toString(), textToSerialize);
 }
}

代码示例来源:origin: apache/hive

@Override
public void readFields(DataInput dataInput) throws IOException {
 boolean notNull = dataInput.readBoolean();
 if (notNull) {
  partVal = new Text();
  partVal.readFields(dataInput);
 }
 notNull = dataInput.readBoolean();
 if (notNull) {
  recId = new RecordIdentifier();
  recId.readFields(dataInput);
 }
}
@Override

代码示例来源:origin: mahmoudparsian/data-algorithms-book

@Override
public void readFields(DataInput in) throws IOException {
  yearMonth.readFields(in);
  day.readFields(in);
  temperature.readFields(in);
}

代码示例来源:origin: apache/accumulo

@Override
public void readFields(DataInput in) throws IOException {
 Text tid = new Text();
 tid.readFields(in);
 setTableId(Table.ID.of(tid.toString()));
 boolean hasRow = in.readBoolean();
 if (hasRow) {
  Text er = new Text();
  er.readFields(in);
  setEndRow(er, false, false);
 } else {
  setEndRow(null, false, false);
 }
 boolean hasPrevRow = in.readBoolean();
 if (hasPrevRow) {
  Text per = new Text();
  per.readFields(in);
  setPrevEndRow(per, false, true);
 } else {
  setPrevEndRow(null);
 }
 hashCode = 0;
 check();
}

代码示例来源:origin: apache/accumulo

long numPairs = dataInput.readInt();
Text colFam = new Text();
colFam.readFields(dataInput);
if (numPairs == 1) {
 columns.add(new Pair<>(colFam, null));
} else if (numPairs == 2) {
 Text colQual = new Text();
 colQual.readFields(dataInput);
 columns.add(new Pair<>(colFam, colQual));

代码示例来源:origin: apache/accumulo

private static LgSummaries readLGroup(DataInputStream in, String[] symbols) throws IOException {
 String lgroupName = in.readUTF();
 // read first row
 Text firstRow = new Text();
 firstRow.readFields(in);
 // read summaries
 int numSummaries = WritableUtils.readVInt(in);
 SummaryInfo[] summaries = new SummaryInfo[numSummaries];
 for (int i = 0; i < numSummaries; i++) {
  int rowLen = WritableUtils.readVInt(in);
  byte[] row = new byte[rowLen];
  in.readFully(row);
  int count = WritableUtils.readVInt(in);
  Map<String,Long> summary = readSummary(in, symbols);
  summaries[i] = new SummaryInfo(row, summary, count);
 }
 return new LgSummaries(firstRow, summaries, lgroupName);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path,
  List<String> keys, List<String> values) throws IOException {
 FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));
 IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in,
   fs.getFileStatus(path).getLen(), null, null);
 DataInputBuffer keyBuff = new DataInputBuffer();
 DataInputBuffer valueBuff = new DataInputBuffer();
 Text key = new Text();
 Text value = new Text();
 while (reader.nextRawKey(keyBuff)) {
  key.readFields(keyBuff);
  keys.add(key.toString());
  reader.nextRawValue(valueBuff);
  value.readFields(valueBuff);
  values.add(value.toString());
 }
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

/** {@inheritDoc} */
@Override
public void readFields(DataInput in) throws IOException {
 jobid.readFields(in);
}

相关文章