org.apache.hyracks.api.context.IHyracksJobletContext.getJobId()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(119)

本文整理了Java中org.apache.hyracks.api.context.IHyracksJobletContext.getJobId()方法的一些代码示例,展示了IHyracksJobletContext.getJobId()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。IHyracksJobletContext.getJobId()方法的具体详情如下:
包路径:org.apache.hyracks.api.context.IHyracksJobletContext
类名称:IHyracksJobletContext
方法名:getJobId

IHyracksJobletContext.getJobId介绍

暂无

代码示例

代码示例来源:origin: apache/asterixdb

@Override
public JobId getJobId() {
  return ctx.getJobletContext().getJobId();
}

代码示例来源:origin: apache/asterixdb

@Override
public void sendApplicationMessageToCC(byte[] message, DeploymentId deploymentId) throws Exception {
  this.ncs.sendApplicationMessageToCC(getJobletContext().getJobId().getCcId(), message, deploymentId);
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new CollectTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  state.buffer = new ArrayList<Object[]>();
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  manager.registerPartition(pid, ctx.getJobletContext().getJobId().getCcId(), taId, this, PartitionState.STARTED,
      false);
  pendingConnection = true;
  ensureConnected();
}

代码示例来源:origin: apache/asterixdb

@Override
public void sendApplicationMessageToCC(Serializable message, DeploymentId deploymentId) throws Exception {
  this.ncs.sendApplicationMessageToCC(getJobletContext().getJobId().getCcId(),
      JavaSerializationUtils.serialize(message), deploymentId);
}

代码示例来源:origin: apache/asterixdb

@Override
public void close() throws HyracksDataException {
  closeTime = System.currentTimeMillis();
  try {
    ((Task) ctx).setPartitionSendProfile(
        new PartitionProfile(new PartitionId(ctx.getJobletContext().getJobId(), cd.getConnectorId(),
            senderIndex, receiverIndex), openTime, closeTime, mrep));
  } finally {
    writer.close();
  }
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  if (LOGGER.isEnabled(openCloseLevel)) {
    LOGGER.log(openCloseLevel, "open(" + pid + " by " + taId);
  }
  size = 0;
  eos = false;
  failed = false;
  deallocated = false;
  manager.registerPartition(pid, ctx.getJobletContext().getJobId().getCcId(), taId, this, PartitionState.STARTED,
      false);
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new MaterializerTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  state.open(ctx);
}

代码示例来源:origin: apache/asterixdb

@Override
public void close() throws HyracksDataException {
  SortTaskState state = new SortTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  runGen.close();
  state.generatedRunFileReaders = runGen.getRuns();
  state.sorter = runGen.getSorter();
  if (LOGGER.isTraceEnabled()) {
    LOGGER.trace("InitialNumberOfRuns:" + runGen.getRuns().size());
  }
  ctx.setStateObject(state);
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new MaterializerTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  state.open(ctx);
}

代码示例来源:origin: apache/asterixdb

@Override
  public void close() throws HyracksDataException {
    // expecting a range map
    if (numFields <= 0 || splitValues == null || splitValuesEndOffsets == null) {
      throw HyracksDataException.create(ErrorCode.NO_RANGEMAP_PRODUCED, sourceLoc);
    }
    // store the range map in the state object of ctx so that next activity (forward) could retrieve it
    TaskId rangeMapReaderTaskId = new TaskId(activityId, partition);
    RangeMapState rangeMapState = new RangeMapState(ctx.getJobletContext().getJobId(), rangeMapReaderTaskId);
    rangeMapState.rangeMap = new RangeMap(numFields, splitValues, splitValuesEndOffsets);
    ctx.setStateObject(rangeMapState);
  }
}

代码示例来源:origin: apache/asterixdb

@Override
public final void deinitialize() throws HyracksDataException {
  activeManager.deregisterRuntime(runtimeId);
  try {
    ctx.sendApplicationMessageToCC(new ActivePartitionMessage(runtimeId, ctx.getJobletContext().getJobId(),
        Event.RUNTIME_DEREGISTERED, null), null);
  } catch (Exception e) {
    LOGGER.log(Level.INFO, "deinitialize() failed on ActiveSourceOperatorNodePushable", e);
    throw HyracksDataException.create(e);
  } finally {
    LOGGER.log(Level.INFO, "deinitialize() returning on ActiveSourceOperatorNodePushable");
  }
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  if (requiresMaterialization) {
    state = new MaterializerTaskState(ctx.getJobletContext().getJobId(),
        new TaskId(getActivityId(), partition), numberOfMaterializedOutputs);
    state.open(ctx);
  }
  for (int i = 0; i < numberOfNonMaterializedOutputs; i++) {
    isOpen[i] = true;
    writers[i].open();
  }
}

代码示例来源:origin: apache/asterixdb

@Override
public void run() {
  TaskProfile taskProfile =
      new TaskProfile(task.getTaskAttemptId(), task.getPartitionSendProfile(), task.getStatsCollector());
  try {
    ncs.getClusterController(task.getJobletContext().getJobId().getCcId()).notifyTaskComplete(
        task.getJobletContext().getJobId(), task.getTaskAttemptId(), ncs.getId(), taskProfile);
  } catch (Exception e) {
    LOGGER.log(Level.ERROR, "Failed notifying task complete for " + task.getTaskAttemptId(), e);
  }
  task.getJoblet().removeTask(task);
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new ExternalGroupState(ctx.getJobletContext().getJobId(), stateId);
  ISpillableTable table = spillableTableFactory.buildSpillableTable(ctx, tableSize, fileSize, keyFields,
      comparators, firstNormalizerComputer, aggregatorFactory, inRecordDescriptor, outRecordDescriptor,
      framesLimit, 0);
  RunFileWriter[] runFileWriters = new RunFileWriter[table.getNumPartitions()];
  this.externalGroupBy = new ExternalHashGroupBy(this, table, runFileWriters, inRecordDescriptor);
  state.setSpillableTable(table);
  state.setRuns(runFileWriters);
  state.setSpilledNumTuples(externalGroupBy.getSpilledNumTuples());
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new JoinCacheTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  state.joiner = new NestedLoopJoin(ctx, new FrameTupleAccessor(rd0), new FrameTupleAccessor(rd1),
      comparator, memSize, predEvaluator, isLeftOuter, nullWriters1);
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  super.open();
  deletedTupleCounter = new DeletedTupleCounter(ctx.getJobletContext().getJobId(), partition);
  ctx.setStateObject(deletedTupleCounter);
  try {
    tb = new ArrayTupleBuilder(recordDesc.getFieldCount());
    dos = tb.getDataOutput();
    appender = new FrameTupleAppender(new VSizeFrame(ctx), true);
  } catch (Exception e) {
    throw HyracksDataException.create(e);
  }
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  state = new SortTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  IFrameBufferManager frameBufferManager = new VariableFrameMemoryManager(
      new VariableFramePool(ctx, VariableFramePool.UNLIMITED_MEMORY),
      FrameFreeSlotPolicyFactory.createFreeSlotPolicy(EnumFreeSlotPolicy.LAST_FIT));
  state.frameSorter =
      new FrameSorterMergeSort(ctx, frameBufferManager, VariableFramePool.UNLIMITED_MEMORY,
          sortFields, keyNormalizerFactories, comparatorFactories, outRecDescs[0]);
  state.frameSorter.reset();
}

代码示例来源:origin: apache/asterixdb

@Override
public void open() throws HyracksDataException {
  ITuplePartitionComputer hpc0 =
      new FieldHashPartitionComputerFactory(keys0, hashFunctionFactories).createPartitioner(ctx);
  ITuplePartitionComputer hpc1 =
      new FieldHashPartitionComputerFactory(keys1, hashFunctionFactories).createPartitioner(ctx);
  state = new HashBuildTaskState(ctx.getJobletContext().getJobId(),
      new TaskId(getActivityId(), partition));
  ISerializableTable table = new SerializableHashTable(tableSize, ctx, bufferManager);
  state.joiner =
      new InMemoryHashJoin(ctx, new FrameTupleAccessor(rd0), hpc0, new FrameTupleAccessor(rd1),
          rd1, hpc1, new FrameTuplePairComparator(keys0, keys1, comparators), isLeftOuter,
          nullWriters1, table, predEvaluator, bufferManager);
}

代码示例来源:origin: apache/asterixdb

@Override
  public IPushRuntime[] createPushRuntime(IHyracksTaskContext ctx) throws HyracksDataException {
    return new IPushRuntime[] { new CommitRuntime(ctx, new TxnId(ctx.getJobletContext().getJobId().getId()),
        getDatasetId(), primaryKeyFieldPermutation, true,
        ctx.getTaskAttemptId().getTaskId().getPartition(), true) };
  }
};

相关文章