org.apache.hadoop.io.file.tfile.Utils类的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(5.8k)|赞(0)|评价(0)|浏览(91)

本文整理了Java中org.apache.hadoop.io.file.tfile.Utils类的一些代码示例,展示了Utils类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils类的具体详情如下:
包路径:org.apache.hadoop.io.file.tfile.Utils
类名称:Utils

Utils介绍

[英]Supporting Utility classes used by TFile, and shared by users of TFile.
[中]支持由TFile使用并由TFile用户共享的实用程序类。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

public TFileMeta(DataInput in) throws IOException {
 version = new Version(in);
 if (!version.compatibleWith(TFile.API_VERSION)) {
  throw new RuntimeException("Incompatible TFile fileVersion.");
 }
 recordCount = Utils.readVLong(in);
 strComparator = Utils.readString(in);
 comparator = makeComparator(strComparator);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public void write(DataOutput out) throws IOException {
 TFile.API_VERSION.write(out);
 Utils.writeVLong(out, recordCount);
 Utils.writeString(out, strComparator);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * @param key
 *          input key.
 * @return the ID of the first block that contains key >= input key. Or -1
 *         if no such block exists.
 */
public int lowerBound(RawComparable key) {
 if (comparator == null) {
  throw new RuntimeException("Cannot search in unsorted TFile");
 }
 if (firstKey == null) {
  return -1; // not found
 }
 int ret = Utils.lowerBound(index, key, comparator);
 if (ret == index.size()) {
  return -1;
 }
 return ret;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public void write(DataOutput out) throws IOException {
  Utils.writeVInt(out, key.length);
  out.write(key, 0, key.length);
  Utils.writeVLong(out, kvEntries);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public void write(DataOutput out) throws IOException {
  Utils.writeString(out, defaultCompressionAlgorithm.getName());
  Utils.writeVInt(out, listRegions.size());
  for (BlockRegion region : listRegions) {
   region.write(out);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * @param key
 *          input key.
 * @return the ID of the first block that contains key > input key. Or -1
 *         if no such block exists.
 */
public int upperBound(RawComparable key) {
 if (comparator == null) {
  throw new RuntimeException("Cannot search in unsorted TFile");
 }
 if (firstKey == null) {
  return -1; // not found
 }
 int ret = Utils.upperBound(index, key, comparator);
 if (ret == index.size()) {
  return -1;
 }
 return ret;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public void write(DataOutput out) throws IOException {
 Utils.writeVLong(out, offset);
 Utils.writeVLong(out, compressedSize);
 Utils.writeVLong(out, rawSize);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public BlockRegion(DataInput in) throws IOException {
 offset = Utils.readVLong(in);
 compressedSize = Utils.readVLong(in);
 rawSize = Utils.readVLong(in);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Constructor.
 * 
 * @param out
 *          the underlying output stream.
 * @param size
 *          The total # of bytes to be written as a single chunk.
 * @throws java.io.IOException
 *           if an I/O error occurs.
 */
public SingleChunkEncoder(DataOutputStream out, int size)
  throws IOException {
 this.out = out;
 this.remain = size;
 Utils.writeVInt(out, size);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Reading the length of next chunk.
 * 
 * @throws java.io.IOException
 *           when no more data is available.
 */
private void readLength() throws IOException {
 remain = Utils.readVInt(in);
 if (remain >= 0) {
  lastChunk = true;
 } else {
  remain = -remain;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public void write(DataOutput out) throws IOException {
  Utils.writeString(out, defaultPrefix + metaName);
  Utils.writeString(out, compressionAlgorithm.getName());
  region.write(out);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public MetaIndexEntry(DataInput in) throws IOException {
 String fullMetaName = Utils.readString(in);
 if (fullMetaName.startsWith(defaultPrefix)) {
  metaName =
    fullMetaName.substring(defaultPrefix.length(), fullMetaName
      .length());
 } else {
  throw new IOException("Corrupted Meta region Index");
 }
 compressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 region = new BlockRegion(in);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public TFileIndexEntry(DataInput in) throws IOException {
 int len = Utils.readVInt(in);
 key = new byte[len];
 in.readFully(key, 0, len);
 kvEntries = Utils.readVLong(in);
}

代码示例来源:origin: io.hops/hadoop-common

public void write(DataOutput out) throws IOException {
  Utils.writeVInt(out, key.length);
  out.write(key, 0, key.length);
  Utils.writeVLong(out, kvEntries);
 }
}

代码示例来源:origin: io.hops/hadoop-common

public DataIndex(DataInput in) throws IOException {
 defaultCompressionAlgorithm =
   Compression.getCompressionAlgorithmByName(Utils.readString(in));
 int n = Utils.readVInt(in);
 listRegions = new ArrayList<BlockRegion>(n);
 for (int i = 0; i < n; i++) {
  BlockRegion region = new BlockRegion(in);
  listRegions.add(region);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void write(DataOutput out) throws IOException {
  Utils.writeString(out, defaultCompressionAlgorithm.getName());
  Utils.writeVInt(out, listRegions.size());
  for (BlockRegion region : listRegions) {
   region.write(out);
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public Reader.Location getLocationByRecordNum(long recNum) {
 int idx = Utils.upperBound(recordNumIndex, recNum);
 long lastRecNum = (idx == 0)? 0: recordNumIndex.get(idx-1);
 return new Reader.Location(idx, recNum-lastRecNum);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Encoding an integer into a variable-length encoding format. Synonymous to
 * <code>Utils#writeVLong(out, n)</code>.
 * 
 * @param out
 *          output stream
 * @param n
 *          The integer to be encoded
 * @throws IOException
 * @see Utils#writeVLong(DataOutput, long)
 */
public static void writeVInt(DataOutput out, int n) throws IOException {
 writeVLong(out, n);
}

相关文章