org.apache.hadoop.io.Text.toString()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(203)

本文整理了Java中org.apache.hadoop.io.Text.toString()方法的一些代码示例,展示了Text.toString()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Text.toString()方法的具体详情如下:
包路径:org.apache.hadoop.io.Text
类名称:Text
方法名:toString

Text.toString介绍

[英]Convert text back to string
[中]将文本转换回字符串

代码示例

代码示例来源:origin: apache/hive

public Text evaluate(Text s, Text search, Text replacement) {
  if (s == null || search == null || replacement == null) {
   return null;
  }
  String r = s.toString().replace(search.toString(), replacement.toString());
  result.set(r);
  return result;
 }
}

代码示例来源:origin: Alluxio/alluxio

/**
  * Merges the IP addresses of same Status.
  */
 @Override
 protected void reduce(Text key, Iterable<Text> values, Context context)
   throws IOException, InterruptedException {
  Set<String> addressSet = new HashSet<>();
  for (Text val : values) {
   addressSet.add(val.toString());
  }
  context.write(key, new Text(String.join(" ", addressSet)));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
 public String toString() {
  StringBuilder sb = new StringBuilder();
  sb.append("size: ").append(this.theMetadata.size()).append("\n");
  Iterator<Map.Entry<Text, Text>> iter =
   this.theMetadata.entrySet().iterator();
  while (iter.hasNext()) {
   Map.Entry<Text, Text> en = iter.next();
   sb.append("\t").append(en.getKey().toString()).append("\t").append(en.getValue().toString());
   sb.append("\n");
  }
  return sb.toString();
 }
}

代码示例来源:origin: apache/flink

@Override
public void map(final IntWritable k, final Text v,
    final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
  out.collect(k, v);
  out.collect(k, new Text(v.toString().toUpperCase()));
}

代码示例来源:origin: apache/hive

Text txt = new Text();
txt.set(bw.getBytes(), 0, bw.getLength());
sb.append(txt.toString());
break;
buildJSONString(sb, e.getKey(), mapKeyObjectInspector, JSON_NULL);
sb.append(COLON);
buildJSONString(sb, e.getValue(), mapValueObjectInspector, JSON_NULL);

代码示例来源:origin: apache/flink

@Override
public void map(IntWritable k, Text v, OutputCollector<IntWritable, Text> out, Reporter r)
    throws IOException {
  if (v.toString().startsWith(filterPrefix)) {
    out.collect(k, v);
  }
}

代码示例来源:origin: apache/ignite

/** {@inheritDoc} */
@Override public void map(LongWritable key, Text val, OutputCollector<Text, IntWritable> output, Reporter reporter)
    throws IOException {
  assert wasConfigured : "Mapper should be configured";
  String line = val.toString();
  StringTokenizer tokenizer = new StringTokenizer(line);
  while (tokenizer.hasMoreTokens()) {
    word.set(tokenizer.nextToken());
    output.collect(word, one);
  }
  HadoopErrorSimulator.instance().onMap();
}

代码示例来源:origin: prestodb/presto

/**
 * Gets a set of locality groups that should be added to the index table (not the metrics table).
 *
 * @param table Table for the locality groups, see AccumuloClient#getTable
 * @return Mapping of locality group to column families in the locality group, 1:1 mapping in
 * this case
 */
public static Map<String, Set<Text>> getLocalityGroups(AccumuloTable table)
{
  Map<String, Set<Text>> groups = new HashMap<>();
  // For each indexed column
  for (AccumuloColumnHandle columnHandle : table.getColumns().stream().filter(AccumuloColumnHandle::isIndexed).collect(Collectors.toList())) {
    // Create a Text version of the index column family
    Text indexColumnFamily = new Text(getIndexColumnFamily(columnHandle.getFamily().get().getBytes(UTF_8), columnHandle.getQualifier().get().getBytes(UTF_8)).array());
    // Add this to the locality groups,
    // it is a 1:1 mapping of locality group to column families
    groups.put(indexColumnFamily.toString(), ImmutableSet.of(indexColumnFamily));
  }
  return groups;
}

代码示例来源:origin: apache/flink

@Override
public void map(LongWritable k, Text v, OutputCollector<Text, LongWritable> out, Reporter rep)
    throws IOException {
  // normalize and split the line
  String line = v.toString();
  String[] tokens = line.toLowerCase().split("\\W+");
  // emit the pairs
  for (String token : tokens) {
    if (token.length() > 0) {
      out.collect(new Text(token), new LongWritable(1L));
    }
  }
}

代码示例来源:origin: apache/hive

@Override
 public Text evaluate(Text s) {
  if (s == null) {
   return null;
  }
  t.set(s.toString().toUpperCase());
  return t;
 }
});

代码示例来源:origin: apache/hive

Text txt = new Text();
 txt.set(b, 0, b.length);
 appendWithQuotes(sb, SerDeUtils.escapeString(txt.toString()));
 break;
case DATE:
 buildJSONString(keyBuilder, e.getKey(), mapKeyObjectInspector);
 String keyString = keyBuilder.toString().trim();
 if ((!keyString.isEmpty()) && (keyString.charAt(0) != SerDeUtils.QUOTE)) {
 buildJSONString(sb, e.getValue(), mapValueObjectInspector);

代码示例来源:origin: voldemort/voldemort

/**
 * Read the metadata from a hadoop SequenceFile
 * 
 * @param fs The filesystem to read from
 * @param path The file to read from
 * @return The metadata from this file
 */
public static Map<String, String> getMetadataFromSequenceFile(FileSystem fs, Path path) {
  try {
    Configuration conf = new Configuration();
    conf.setInt("io.file.buffer.size", 4096);
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, new Configuration());
    SequenceFile.Metadata meta = reader.getMetadata();
    reader.close();
    TreeMap<Text, Text> map = meta.getMetadata();
    Map<String, String> values = new HashMap<String, String>();
    for(Map.Entry<Text, Text> entry: map.entrySet())
      values.put(entry.getKey().toString(), entry.getValue().toString());
    return values;
  } catch(IOException e) {
    throw new RuntimeException(e);
  }
}

代码示例来源:origin: apache/flink

@Override
public void map(final IntWritable k, final Text v,
    final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
  if (v.toString().contains("bananas")) {
    out.collect(k, v);
  }
}

代码示例来源:origin: apache/incubator-pinot

@Override
protected void map(LongWritable key, Text value, Context context)
  throws IOException, InterruptedException {
 String line = value.toString();
 String[] lineSplits = line.split(" ");
 context.write(new LongWritable(Long.parseLong(lineSplits[2])), new Text(
   FileSystem.get(_properties).listStatus(new Path(_localHdfsSegmentTarPath + "/"))[0].getPath().getName()));
 LOGGER.info("Finished the job successfully");

代码示例来源:origin: intel-hadoop/HiBench

public void reduce (final IntWritable key, final Iterator<Text> values, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    while (values.hasNext()) {
      String cur_val = values.next().toString();
      output.collect( key, new Text( cur_val ) );
    }
  }
}

代码示例来源:origin: apache/hive

@Override
 public Text evaluate(Text s) {
  if (s == null) {
   return null;
  }
  t.set(s.toString().toLowerCase());
  return t;
 }
});

代码示例来源:origin: intel-hadoop/HiBench

public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
    StringTokenizer tokens = new StringTokenizer(value.toString(), " \t\n\r\f%");
    String attr = tokens.nextToken(); 
    if (attr.endsWith(":tput_samples")){
      String[] tags=attr.split(":");
      String[] samples = tokens.nextToken().split(";");
      for(int j=0; !samples[j].startsWith("EoR"); j++){
        t.set(samples[j]);
        context.write(new Text(tags[1]), t);
      }
    }
  }
}

代码示例来源:origin: apache/ignite

/** {@inheritDoc} */
    @Override protected void map(LongWritable key, Text val, Context ctx) throws IOException, InterruptedException {
//            X.printerrln("___ map: " + val);

      ctx.write(UUID.fromString(val.toString()), NullWritable.get());
    }
  }

代码示例来源:origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int commentCnt = 0;
  while (vs.hasNext()) {
    String v = vs.next().toString();
    if (v.startsWith(this.countPrefix)) {
      commentCnt++;
    }
  }
  out.collect(k, new IntWritable(commentCnt));
}

代码示例来源:origin: brianfrankcooper/YCSB

@Override
public Status read(String table, String key, Set<String> fields,
          Map<String, ByteIterator> result) {
 Scanner scanner = null;
 try {
  scanner = getRow(table, new Text(key), null);
  // Pick out the results we care about.
  final Text cq = new Text();
  for (Entry<Key, Value> entry : scanner) {
   entry.getKey().getColumnQualifier(cq);
   Value v = entry.getValue();
   byte[] buf = v.get();
   result.put(cq.toString(),
     new ByteArrayByteIterator(buf));
  }
 } catch (Exception e) {
  System.err.println("Error trying to reading Accumulo table " + table + " " + key);
  e.printStackTrace();
  return Status.ERROR;
 } finally {
  if (null != scanner) {
   scanner.close();
  }
 }
 return Status.OK;
}

相关文章