water.Job.isRunning()方法的使用及代码示例

x33g5p2x  于2022-01-22 转载在 其他  
字(9.2k)|赞(0)|评价(0)|浏览(117)

本文整理了Java中water.Job.isRunning()方法的一些代码示例,展示了Job.isRunning()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Job.isRunning()方法的具体详情如下:
包路径:water.Job
类名称:Job
方法名:isRunning

Job.isRunning介绍

[英]Returns true if this job is running
[中]如果此作业正在运行,则返回true

代码示例

代码示例来源:origin: h2oai/h2o-2

/**
 * Returns true if job is not running.
 * The job can be cancelled, crashed, or already done.
 *
 * @param jobkey job identification key
 * @return true if job is done, cancelled, or crashed, else false
 */
public static boolean isEnded(Key jobkey) { return !isRunning(jobkey); }

代码示例来源:origin: h2oai/h2o-2

/** Check if given job is running.
 *
 * @param job_key job key
 * @return true if job is still running else returns false.
 */
public static boolean isRunning(Key job_key) {
 Job j = UKV.get(job_key);
 assert j!=null : "Job should be always in DKV!";
 return j.isRunning();
}
/**

代码示例来源:origin: h2oai/h2o-2

@Override public void run() {
  while( _job == null || Job.isRunning(_job) ) {
   if( !home )
    _node.sync();
   else {
    _node._total = _node._trainer.processed();
    try {
     Thread.sleep(1);
    } catch( InterruptedException ex ) {
    }
   }
  }
 }
};

代码示例来源:origin: h2oai/h2o-2

@Override public void run() {
      for( long i = 0; _stepsPerThread == 0 || i < _stepsPerThread; i++ ) {
       if( job != null && (!Job.isRunning(job) || !NeuralNet.running ) )
        break;
       try {
//                long seed = thread_num * _stepsPerThread + input._pos; //BAD
        long seed = new Random().nextLong(); //GOOD
//                long seed = thread_num * _stepsPerThread + _processed.get(); //TRY
        trainer.step(seed);
        input.move();
        _processed.incrementAndGet();
       } catch (Exception e) {
        e.getStackTrace();
       }
      }
     }
    };

代码示例来源:origin: h2oai/h2o-2

public void shrink() {
  if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();
//    for ( Col c: _c) c.shrink();
  // sort columns in parallel: c.shrink() calls single-threaded Arrays.sort()
  RecursiveAction [] ras = new RecursiveAction[_c.length];
  int i=0;
  for ( final Col c: _c) {
   ras[i++] = new RecursiveAction() {
    @Override public void compute() { c.shrink(); }
   };
  }
  ForkJoinTask.invokeAll(ras);
 }

代码示例来源:origin: h2oai/h2o-2

public void run() {
 Training training = new Training() {
  @Override long processed() {
   return _processed;
  }
 };
 for (Layer _l : _ls) _l._training = training;
 Input input = (Input) _ls[0];
 for( ; _limit == 0 || _processed < _limit; _processed++ ) {
  step(_processed);
  input.move();
  if( _job != null && (!Job.isRunning(_job) || !NeuralNet.running ) )
   break;
 }
}

代码示例来源:origin: h2oai/h2o-2

@Override
 public String elementToString(JsonElement elm, String contextName) {
  String html;
  if( !Job.isRunning(Key.make(elm.getAsString())) )
   html = "<button disabled class='btn btn-mini'>X</button>";
  else {
   String keyParam = KEY + "=" + elm.getAsString();
   html = "<a href='/Cancel.html?" + keyParam + "'><button class='btn btn-danger btn-mini'>X</button></a>";
  }
  return html;
 }
});

代码示例来源:origin: h2oai/h2o-3

/**
 * Takes in an AutoML instance and starts running it. Progress can be tracked via its job().
 * @param aml
 * @return
  */
public static void startAutoML(AutoML aml) {
 // Currently AutoML can only run one job at a time
 if (aml.job == null || !aml.job.isRunning()) {
  H2OJob j = new H2OJob(aml, aml._key, aml.timeRemainingMs());
  aml.job = j._job;
  aml.planWork();
  j.start(aml.workAllocations.remainingWork());
  DKV.put(aml);
 }
}

代码示例来源:origin: h2oai/h2o-2

@Override public void compute2() {
 if(Job.isRunning(_jobKey)) {
  Timer timer    = new Timer();
  _stats[0]      = new ThreadLocal<hex.singlenoderf.Statistic>();
  if(_jobKey != null && !Job.isRunning(_jobKey)) throw new Job.JobCancelledException();

代码示例来源:origin: h2oai/h2o-2

@Override
public void reduce(GLMIterationTask git){
 if(_jobKey == null || Job.isRunning(_jobKey)) {
  Utils.add(_xy, git._xy);
  if (_computeGram) _gram.add(git._gram);
  _yy += git._yy;
  _nobs += git._nobs;
  if (_validate) _val.add(git._val);
  if (_computeGradient) Utils.add(_grad, git._grad);
  if(_validate && _glm.family == Family.binomial) {
   _newThresholds[0] = Utils.join(_newThresholds[0], git._newThresholds[0]);
   _newThresholds[1] = Utils.join(_newThresholds[1], git._newThresholds[1]);
   if (_newThresholds[0].length >= 2 * N_THRESHOLDS) {
    for (int i = 0; i < 2 * N_THRESHOLDS; i += 2)
     _newThresholds[0][i >> 1] = _newThresholds[0][i];
   }
   if (_newThresholds[0].length > N_THRESHOLDS)
    _newThresholds[0] = Arrays.copyOf(_newThresholds[0], N_THRESHOLDS);
   if (_newThresholds[1].length >= 2 * N_THRESHOLDS) {
    for (int i = 0; i < 2 * N_THRESHOLDS; i += 2)
     _newThresholds[1][i >> 1] = _newThresholds[1][i];
   }
   if (_newThresholds[1].length > N_THRESHOLDS)
    _newThresholds[1] = Arrays.copyOf(_newThresholds[1], N_THRESHOLDS);
  }
  super.reduce(git);
 }
}

代码示例来源:origin: h2oai/h2o-2

@Override public final void processRow(long seed, final double [] nums, final int numcats, final int [] cats, double [] responses){
 if(_output.get_params().self() != null && !Job.isRunning(_output.get_params().self())) throw new Job.JobCancelledException();
 if (model_info().get_params().reproducible) {
  seed += model_info().get_processed_global(); //avoid periodicity
 } else {
  seed = new Random().nextLong();
 }
 ((Neurons.Input)_neurons[0]).setInput(seed, nums, numcats, cats);
 step(seed, _neurons, _output, _training, responses);
}

代码示例来源:origin: h2oai/h2o-2

@Override public void compute2() {
  if( (_count < 0 || --_count >= 0) && (_node._job == null || Job.isRunning(_node._job)) ) {
   for( Chunk[] cs : _node._chunks ) {
    DescentChunk task = new DescentChunk();
    task._node = _node;
    task._cs = cs;
    H2O.submitTask(task);
   }
   reinitialize();
   H2O.submitTask(this);
  } else {
   if( _node._key.home() )
    _node._trainer.done();
  }
 }
}

代码示例来源:origin: h2oai/h2o-2

if (!isRunning(self()) || !running) break;
} else {
 if (!running) break; //MapReduce calls cancel() early, we are waiting for running = false

代码示例来源:origin: h2oai/h2o-2

DataAdapter(Frame fr, SpeeDRFModel model, int[] modelDataMap, int rows,
       long unique, long seed, int binLimit, double[] classWt) {
//    assert model._dataKey == fr._key;
  _seed       = seed+(unique<<16); // This is important to preserve sampling selection!!!
  /* Maximum arity for a column (not a hard limit) */
  _numRows    = rows;
  _jobKey     = model.jobKey;
  _numClasses = model.regression ? 1 : model.classes();
  _regression = model.regression;
  _c = new Col[fr.numCols()];
  for( int i = 0; i < _c.length; i++ ) {
   if(model.jobKey != null && !Job.isRunning(model.jobKey)) throw new Job.JobCancelledException();
   assert fr._names[modelDataMap[i]].equals(fr._names[i]);
   Vec v = fr.vecs()[i];
   if( isByteCol(v,rows, i == _c.length-1, _regression) ) // we do not bin for small values
    _c[i] = new Col(fr._names[i], rows, i == _c.length-1);
   else
    _c[i] = new Col(fr._names[i], rows, i == _c.length-1, binLimit, !(v.isEnum() || v.isInt()));
  }
  boolean trivial = true;
  if (classWt != null) for(double f: classWt) if (f != 1.0) trivial = false;
  _classWt = trivial ?  null : classWt;
 }

代码示例来源:origin: h2oai/h2o-2

ktrees = buildNextKTrees(fr,_mtry,sample_rate,rand,tid);
 Log.info(logTag(), (tid+1) + ". tree was built " + kb_timer.toString());
 if( !Job.isRunning(self()) ) break; // If canceled during building, do not bulkscore
if( Job.isRunning(self()) ) { // do not perform final scoring and finish
 model = doScoring(model, fr, ktrees, tid, tstats, true, !hasValidation(), build_tree_one_node);

代码示例来源:origin: h2oai/h2o-2

@Override public void compute2() {
 if( _node._job == null || (Job.isRunning(_node._job) && NeuralNet.running)) {
  Layer[] clones = new Layer[_node._ls.length];
  ChunksInput input = new ChunksInput(Utils.remove(_cs, _cs.length - 1), (VecsInput) _node._ls[0]);

代码示例来源:origin: h2oai/h2o-2

@Override public void lcompute() {
  final H2O.KeyInfo[] kinfo = H2O.KeySnapshot.localSnapshot(true)._keyInfos;
  for(H2O.KeyInfo k:kinfo) {
   if(!k.isLockable()) continue;
   final Value val = DKV.get(k._key);
   if( val == null ) continue;
   final Object obj = val.rawPOJO();
   if( obj == null ) continue; //need to have a POJO to be locked
   final Lockable<?> lockable = (Lockable<?>)(obj);
   final Key[] lockers = ((Lockable) obj)._lockers;
   if (lockers != null) {
    // check that none of the locking jobs is still running
    for (Key locker : lockers) {
     if (locker != null && locker.type() == Key.JOB) {
      final Job job = UKV.get(locker);
      if (job != null && job.isRunning())
       throw new UnsupportedOperationException("Cannot unlock all keys since locking jobs are still running.");
     }
    }
    lockable.unlock_all();
    Log.info("Unlocked key '" + k._key + "' from " + lockers.length + " lockers.");
   }
  }
  Log.info("All keys are now unlocked.");
  tryComplete();
 }
}

代码示例来源:origin: h2oai/h2o-2

ktrees = buildNextKTrees(fr);
 Log.info(Sys.GBM__, (tid+1) + ". tree was built in " + kb_timer.toString());
 if( !Job.isRunning(self()) ) break; // If canceled during building, do not bulkscore
if (Job.isRunning(self())) {
 model = doScoring(model, fr, ktrees, tid, tstats, true, false, false);

代码示例来源:origin: h2oai/h2o-2

@Override protected void compute() {
 if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
 try {
  Chunk[] chks = new Chunk[fr.numCols()];
   if(dapt._jobKey != null && !Job.isRunning(dapt._jobKey)) throw new Job.JobCancelledException();
   int rowNum = (int)chks[0]._start + j;
   boolean rowIsValid = false;

代码示例来源:origin: h2oai/h2o-3

void compute() {
 try {
  B builder = createBuilder();
  if (_hasMetalearnerParams) {
   builder._parms = _metalearner_parameters;
  }
  setCommonParams(builder._parms);
  setCrossValidationParams(builder._parms);
  setCustomParams(builder._parms);
  builder.init(false);
  Job<M> j = builder.trainModel();
  while (j.isRunning()) {
   try {
    _job.update(j._work, "training metalearner(" + _model._parms._metalearner_algorithm + ")");
    Thread.sleep(100);
   } catch (InterruptedException ignored) {
   }
  }
  Log.info("Finished training metalearner model(" + _model._parms._metalearner_algorithm + ").");
  _model._output._metalearner = builder.get();
  _model.doScoreOrCopyMetrics(_job);
  if (_parms._keep_levelone_frame) {
   _model._output._levelone_frame_id = _levelOneTrainingFrame; //Keep Level One Training Frame in Stacked Ensemble model object
  }
 } finally {
  cleanup();
  _model.update(_job);
  _model.unlock(_job);
 }
}

相关文章