org.apache.spark.executor.TaskMetrics.memoryBytesSpilled()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(15.7k)|赞(0)|评价(0)|浏览(118)

本文整理了Java中org.apache.spark.executor.TaskMetrics.memoryBytesSpilled()方法的一些代码示例,展示了TaskMetrics.memoryBytesSpilled()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TaskMetrics.memoryBytesSpilled()方法的具体详情如下:
包路径:org.apache.spark.executor.TaskMetrics
类名称:TaskMetrics
方法名:memoryBytesSpilled

TaskMetrics.memoryBytesSpilled介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public Metrics(TaskMetrics metrics, TaskInfo taskInfo) {
 this(
  metrics.executorDeserializeTime(),
  TimeUnit.NANOSECONDS.toMillis(metrics.executorDeserializeCpuTime()),
  metrics.executorRunTime(),
  TimeUnit.NANOSECONDS.toMillis(metrics.executorCpuTime()),
  metrics.resultSize(),
  metrics.jvmGCTime(),
  metrics.resultSerializationTime(),
  metrics.memoryBytesSpilled(),
  metrics.diskBytesSpilled(),
  taskInfo.duration(),
  optionalInputMetric(metrics),
  optionalShuffleReadMetric(metrics),
  optionalShuffleWriteMetrics(metrics),
  optionalOutputMetrics(metrics));
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

private void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exception {
 memoryManager.limit(UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE * 16);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 writer.write(dataToWrite.iterator());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

private void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exception {
 memoryManager.limit(UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE * 16);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 writer.write(dataToWrite.iterator());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core

private void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exception {
 memoryManager.limit(UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE * 16);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < UnsafeShuffleWriter.DEFAULT_INITIAL_SORT_BUFFER_SIZE + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 writer.write(dataToWrite.iterator());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void writeEnoughDataToTriggerSpill() throws Exception {
 memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
 for (int i = 0; i < 10 + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, bigByteArray));
 }
 writer.write(dataToWrite.iterator());
 assertEquals(2, spillFilesCreated.size());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void writeEnoughDataToTriggerSpill() throws Exception {
 memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
 for (int i = 0; i < 10 + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, bigByteArray));
 }
 writer.write(dataToWrite.iterator());
 assertEquals(2, spillFilesCreated.size());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void writeEnoughDataToTriggerSpill() throws Exception {
 memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
 for (int i = 0; i < 10 + 1; i++) {
  dataToWrite.add(new Tuple2<>(i, bigByteArray));
 }
 writer.write(dataToWrite.iterator());
 assertEquals(2, spillFilesCreated.size());
 writer.stop(true);
 readRecordsFromFile();
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
 assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
 assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());

代码示例来源:origin: org.apache.spark/spark-core_2.10

assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());

代码示例来源:origin: org.apache.spark/spark-core

assertThat(taskMetrics.diskBytesSpilled(), greaterThan(0L));
assertThat(taskMetrics.diskBytesSpilled(), lessThan(mergedOutputFile.length()));
assertThat(taskMetrics.memoryBytesSpilled(), greaterThan(0L));
assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void writeWithoutSpilling() throws Exception {
 // In this example, each partition should have exactly one record:
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(dataToWrite.iterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 long sumOfPartitionSizes = 0;
 for (long size: partitionSizesInMergedFile) {
  // All partitions should be the same size:
  assertEquals(partitionSizesInMergedFile[0], size);
  sumOfPartitionSizes += size;
 }
 assertEquals(mergedOutputFile.length(), sumOfPartitionSizes);
 assertEquals(
  HashMultiset.create(dataToWrite),
  HashMultiset.create(readRecordsFromFile()));
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void writeWithoutSpilling() throws Exception {
 // In this example, each partition should have exactly one record:
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(dataToWrite.iterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 long sumOfPartitionSizes = 0;
 for (long size: partitionSizesInMergedFile) {
  // All partitions should be the same size:
  assertEquals(partitionSizesInMergedFile[0], size);
  sumOfPartitionSizes += size;
 }
 assertEquals(mergedOutputFile.length(), sumOfPartitionSizes);
 assertEquals(
  HashMultiset.create(dataToWrite),
  HashMultiset.create(readRecordsFromFile()));
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void writeWithoutSpilling() throws Exception {
 // In this example, each partition should have exactly one record:
 final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
 for (int i = 0; i < NUM_PARTITITONS; i++) {
  dataToWrite.add(new Tuple2<>(i, i));
 }
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(dataToWrite.iterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 long sumOfPartitionSizes = 0;
 for (long size: partitionSizesInMergedFile) {
  // All partitions should be the same size:
  assertEquals(partitionSizesInMergedFile[0], size);
  sumOfPartitionSizes += size;
 }
 assertEquals(mergedOutputFile.length(), sumOfPartitionSizes);
 assertEquals(
  HashMultiset.create(dataToWrite),
  HashMultiset.create(readRecordsFromFile()));
 assertSpillFilesWereCleanedUp();
 ShuffleWriteMetrics shuffleWriteMetrics = taskMetrics.shuffleWriteMetrics();
 assertEquals(dataToWrite.size(), shuffleWriteMetrics.recordsWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
 assertEquals(mergedOutputFile.length(), shuffleWriteMetrics.bytesWritten());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void writeEmptyIterator() throws Exception {
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(Iterators.emptyIterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile);
 assertEquals(0, taskMetrics.shuffleWriteMetrics().recordsWritten());
 assertEquals(0, taskMetrics.shuffleWriteMetrics().bytesWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void writeEmptyIterator() throws Exception {
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(Iterators.emptyIterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile);
 assertEquals(0, taskMetrics.shuffleWriteMetrics().recordsWritten());
 assertEquals(0, taskMetrics.shuffleWriteMetrics().bytesWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void writeEmptyIterator() throws Exception {
 final UnsafeShuffleWriter<Object, Object> writer = createWriter(true);
 writer.write(Iterators.emptyIterator());
 final Option<MapStatus> mapStatus = writer.stop(true);
 assertTrue(mapStatus.isDefined());
 assertTrue(mergedOutputFile.exists());
 assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile);
 assertEquals(0, taskMetrics.shuffleWriteMetrics().recordsWritten());
 assertEquals(0, taskMetrics.shuffleWriteMetrics().bytesWritten());
 assertEquals(0, taskMetrics.diskBytesSpilled());
 assertEquals(0, taskMetrics.memoryBytesSpilled());
}

代码示例来源:origin: com.github.hyukjinkwon/spark-client

public Metrics(TaskMetrics metrics) {
 this(
  metrics.executorDeserializeTime(),
  metrics.executorRunTime(),
  metrics.resultSize(),
  metrics.jvmGCTime(),
  metrics.resultSerializationTime(),
  metrics.memoryBytesSpilled(),
  metrics.diskBytesSpilled(),
  optionalInputMetric(metrics),
  optionalShuffleReadMetric(metrics),
  optionalShuffleWriteMetrics(metrics));
}

代码示例来源:origin: org.spark-project.hive/spark-client

public Metrics(TaskMetrics metrics) {
 this(
  metrics.executorDeserializeTime(),
  metrics.executorRunTime(),
  metrics.resultSize(),
  metrics.jvmGCTime(),
  metrics.resultSerializationTime(),
  metrics.memoryBytesSpilled(),
  metrics.diskBytesSpilled(),
  optionalInputMetric(metrics),
  optionalShuffleReadMetric(metrics),
  optionalShuffleWriteMetrics(metrics));
}

代码示例来源:origin: com.criteo.java/garmadon-frameworks-spark

tryToSet(() -> taskEventBuilder.setPeakExecutionMemory(taskEnd.taskMetrics().peakExecutionMemory()));
tryToSet(() -> taskEventBuilder.setDiskBytesSpilled(taskEnd.taskMetrics().diskBytesSpilled()));
tryToSet(() -> taskEventBuilder.setMemoryBytesSpilled(taskEnd.taskMetrics().memoryBytesSpilled()));
tryToSet(() -> taskEventBuilder.setShuffleReadRecords(taskEnd.taskMetrics().shuffleReadMetrics().recordsRead()));
tryToSet(() -> taskEventBuilder.setShuffleReadFetchWaitTime(taskEnd.taskMetrics().shuffleReadMetrics().fetchWaitTime()));

代码示例来源:origin: com.criteo.java/garmadon-frameworks-spark

tryToSet(() -> stageEventBuilder.setPeakExecutionMemory(stageCompleted.stageInfo().taskMetrics().peakExecutionMemory()));
tryToSet(() -> stageEventBuilder.setDiskBytesSpilled(stageCompleted.stageInfo().taskMetrics().diskBytesSpilled()));
tryToSet(() -> stageEventBuilder.setMemoryBytesSpilled(stageCompleted.stageInfo().taskMetrics().memoryBytesSpilled()));
tryToSet(() -> stageEventBuilder.setShuffleReadRecords(stageCompleted.stageInfo().taskMetrics().shuffleReadMetrics().recordsRead()));
tryToSet(() -> stageEventBuilder.setShuffleReadFetchWaitTime(stageCompleted.stageInfo().taskMetrics().shuffleReadMetrics().fetchWaitTime()));

相关文章