org.apache.hadoop.mapred.InputSplit.getLocations()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(6.0k)|赞(0)|评价(0)|浏览(121)

本文整理了Java中org.apache.hadoop.mapred.InputSplit.getLocations()方法的一些代码示例,展示了InputSplit.getLocations()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。InputSplit.getLocations()方法的具体详情如下:
包路径:org.apache.hadoop.mapred.InputSplit
类名称:InputSplit
方法名:getLocations

InputSplit.getLocations介绍

[英]Get the list of hostnames where the input split is located.
[中]获取输入拆分所在的主机名列表。

代码示例

代码示例来源:origin: apache/flink

@Override
public String[] getHostnames() {
  try {
    return this.hadoopInputSplit.getLocations();
  }
  catch (IOException e) {
    return new String[0];
  }
}

代码示例来源:origin: apache/hive

@Override
public String[] getLocations() throws IOException {
 assert (inputSplits != null && inputSplits.length > 0);
 return inputSplits[0].getLocations();
}

代码示例来源:origin: apache/hive

@Override
public String[] getLocations() throws IOException {
 return inputSplit.getLocations();
}

代码示例来源:origin: apache/drill

@Override
public String[] getLocations() throws IOException {
 assert (inputSplits != null && inputSplits.length > 0);
 return inputSplits[0].getLocations();
}

代码示例来源:origin: apache/drill

@Override
public String[] getLocations() throws IOException {
 return inputSplit.getLocations();
}

代码示例来源:origin: elastic/elasticsearch-hadoop

public String[] getLocations() throws IOException {
  return delegate.getLocations();
}

代码示例来源:origin: apache/avro

public String[] getLocations() throws IOException {
 return inputSplit.getLocations();
}

代码示例来源:origin: apache/hive

@Override
public String[] getLocations() {
 try {
  return baseMapRedSplit.getLocations();
 } catch (IOException e) {
  LOG.warn("Exception in HCatSplit", e);
 }
 return new String[0]; // we errored
}

代码示例来源:origin: apache/hive

@Override
 public String[] getLocations(InputSplit split) throws IOException {
  if (split == null) {
   return null;
  }
  String[] locations = split.getLocations();
  if (locations != null && locations.length == 1) {
   if ("localhost".equals(locations[0])) {
    return ArrayUtils.EMPTY_STRING_ARRAY;
   }
  }
  return locations;
 }
};

代码示例来源:origin: apache/drill

@Override
 public String[] getLocations(InputSplit split) throws IOException {
  if (split == null) {
   return null;
  }
  String[] locations = split.getLocations();
  if (locations != null && locations.length == 1) {
   if ("localhost".equals(locations[0])) {
    return ArrayUtils.EMPTY_STRING_ARRAY;
   }
  }
  return locations;
 }
};

代码示例来源:origin: apache/drill

/**
 * @return collection of unique locations where input splits are stored
 */
public Collection<String> getLocations() throws IOException {
 Set<String> locations = new HashSet<>();
 for (InputSplit inputSplit: inputSplits) {
  Collections.addAll(locations, inputSplit.getLocations());
 }
 return locations;
}

代码示例来源:origin: apache/hive

@Override
 public String[] getLocations() throws IOException {
  return isTableSplit ? tableSplit.getLocations() : snapshotSplit.getLocations();
 }
}

代码示例来源:origin: Alluxio/alluxio

/**
 * Returns a string representation of a {@link InputSplit}.
 *
 * @param is Hadoop {@link InputSplit}
 * @return its string representation
 */
public static String toStringHadoopInputSplit(InputSplit is) {
 StringBuilder sb = new StringBuilder("HadoopInputSplit: ");
 try {
  sb.append(" Length: ").append(is.getLength());
  sb.append(" , Locations: ");
  for (String loc : is.getLocations()) {
   sb.append(loc).append(" ; ");
  }
 } catch (IOException e) {
  LOG.error(e.getMessage());
 }
 return sb.toString();
}

代码示例来源:origin: apache/hive

private InputSplit createMockInputSplit(String[] locations) throws IOException {
 InputSplit inputSplit = mock(InputSplit.class);
 doReturn(locations).when(inputSplit).getLocations();
 return inputSplit;
}

代码示例来源:origin: apache/incubator-druid

(final org.apache.hadoop.mapred.InputSplit split) -> {
 try {
  return Arrays.stream(split.getLocations());

代码示例来源:origin: apache/hive

@Override
public String[] getLocations(InputSplit split) throws IOException {
 if (!(split instanceof FileSplit)) {
  if (LOG.isDebugEnabled()) {
   LOG.debug("Split: " + split + " is not a FileSplit. Using default locations");
  }
  return split.getLocations();
 }
 FileSplit fsplit = (FileSplit) split;
 String splitDesc = "Split at " + fsplit.getPath() + " with offset= " + fsplit.getStart()
   + ", length=" + fsplit.getLength();
 String location = locations.get(determineLocation(
   locations, fsplit.getPath().toString(), fsplit.getStart(), splitDesc));
 return (location != null) ? new String[] { location } : null;
}

代码示例来源:origin: apache/hive

String rack = (split instanceof TezGroupedSplit) ? ((TezGroupedSplit) split).getRack() : null;
if (rack == null) {
 String [] locations = split.getLocations();
 if (locations != null && locations.length > 0) {
   locationHints.add(TaskLocationHint
     .createTaskLocationHint(new LinkedHashSet<String>(Arrays.asList(split
       .getLocations())), null));

代码示例来源:origin: apache/drill

@Override
public String[] getLocations(InputSplit split) throws IOException {
 if (!(split instanceof FileSplit)) {
  if (isDebugEnabled) {
   LOG.debug("Split: " + split + " is not a FileSplit. Using default locations");
  }
  return split.getLocations();
 }
 FileSplit fsplit = (FileSplit) split;
 String splitDesc = "Split at " + fsplit.getPath() + " with offset= " + fsplit.getStart()
   + ", length=" + fsplit.getLength();
 String location = locations.get(determineLocation(
   locations, fsplit.getPath().toString(), fsplit.getStart(), splitDesc));
 return (location != null) ? new String[] { location } : null;
}

代码示例来源:origin: apache/ignite

/**
 * @param jobConf Job configuration.
 * @return Collection of mapped splits.
 * @throws IgniteCheckedException If mapping failed.
 */
public static Collection<HadoopInputSplit> splitJob(JobConf jobConf) throws IgniteCheckedException {
  try {
    InputFormat<?, ?> format = jobConf.getInputFormat();
    assert format != null;
    InputSplit[] splits = format.getSplits(jobConf, 0);
    Collection<HadoopInputSplit> res = new ArrayList<>(splits.length);
    for (int i = 0; i < splits.length; i++) {
      InputSplit nativeSplit = splits[i];
      if (nativeSplit instanceof FileSplit) {
        FileSplit s = (FileSplit)nativeSplit;
        res.add(new HadoopFileBlock(s.getLocations(), s.getPath().toUri(), s.getStart(), s.getLength()));
      }
      else
        res.add(HadoopUtils.wrapSplit(i, nativeSplit, nativeSplit.getLocations()));
    }
    return res;
  }
  catch (IOException e) {
    throw new IgniteCheckedException(e);
  }
}

代码示例来源:origin: apache/hbase

Assert.assertTrue(split.getLocations() != null);
} else {
 Assert.assertTrue(split.getLocations() != null && split.getLocations().length == 0);

相关文章

微信公众号

最新文章

更多