org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getLayerWiseConfigurations()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(10.8k)|赞(0)|评价(0)|浏览(97)

本文整理了Java中org.deeplearning4j.nn.multilayer.MultiLayerNetwork.getLayerWiseConfigurations()方法的一些代码示例,展示了MultiLayerNetwork.getLayerWiseConfigurations()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。MultiLayerNetwork.getLayerWiseConfigurations()方法的具体详情如下:
包路径:org.deeplearning4j.nn.multilayer.MultiLayerNetwork
类名称:MultiLayerNetwork
方法名:getLayerWiseConfigurations

MultiLayerNetwork.getLayerWiseConfigurations介绍

暂无

代码示例

代码示例来源:origin: deeplearning4j/dl4j-examples

public static void saveModel(FileSystem fs, Model model ) throws Exception{

    String  json = null;
    if (model instanceof MultiLayerNetwork) {
      json = ((MultiLayerNetwork)model).getLayerWiseConfigurations().toJson();
    } else if (model instanceof ComputationGraph) {
      json = ((ComputationGraph)model).getConfiguration().toJson();
    }
    byte [] byts = json.getBytes();
    FSDataOutputStream out = fs.create(new Path(modelPath));
    out.write(byts);
    out.hsync();
    fs.close();

  }
}

代码示例来源:origin: de.datexis/texoo-core

public MultiLayerConfiguration getLayerConfiguration() {
 if(net == null) return null;
 else if(net instanceof MultiLayerNetwork) return ((MultiLayerNetwork) net).getLayerWiseConfigurations();
 else return null;
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Prints the configuration
 */
public void printConfiguration() {
  StringBuilder sb = new StringBuilder();
  int count = 0;
  for (NeuralNetConfiguration conf : getLayerWiseConfigurations().getConfs()) {
    sb.append(" Layer " + count++ + " conf " + conf);
  }
  log.info(sb.toString());
}

代码示例来源:origin: CampagneLaboratory/variationanalysis

protected static void save(MultiLayerNetwork net, String confOut, String paramOut, String updaterOut) throws IOException {
  String confJSON = net.getLayerWiseConfigurations().toJson();
  INDArray params = net.params();
  Updater updater = net.getUpdater();
  FileUtils.writeStringToFile(new File(confOut), confJSON, "UTF-8");
  try (DataOutputStream dos = new DataOutputStream(new BufferedOutputStream(Files.newOutputStream(Paths.get(paramOut))))) {
    Nd4j.write(params, dos);
  }
  try (ObjectOutputStream oos = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(new File(updaterOut))))) {
    oos.writeObject(updater);
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Multilayer Network to tweak for transfer learning
 * @param origModel
 */
public Builder(MultiLayerNetwork origModel) {
  this.origModel = origModel;
  this.origConf = origModel.getLayerWiseConfigurations().clone();
  this.inputPreProcessors = origConf.getInputPreProcessors();
}

代码示例来源:origin: apache/opennlp-sandbox

/**
 * Zips the current state of the model and writes it stream
 * @param stream stream to write
 * @throws IOException
 */
public void saveModel(OutputStream stream) throws IOException {
  try (ZipOutputStream zipOut = new ZipOutputStream(new BufferedOutputStream(stream))) {
    // Write out manifest
    zipOut.putNextEntry(new ZipEntry(MANIFEST));
    String comments = "Created-By:" + System.getenv("USER") + " at " + new Date().toString()
        + "\nModel-Version: " + VERSION
        + "\nModel-Schema:" + MODEL_NAME;
    manifest.store(zipOut, comments);
    zipOut.closeEntry();
    // Write out the network
    zipOut.putNextEntry(new ZipEntry(NETWORK));
    byte[] jModel = network.getLayerWiseConfigurations().toJson().getBytes();
    zipOut.write(jModel);
    zipOut.closeEntry();
    //Write out the network coefficients
    zipOut.putNextEntry(new ZipEntry(WEIGHTS));
    Nd4j.write(network.params(), new DataOutputStream(zipOut));
    zipOut.closeEntry();
    // Write out vectors
    zipOut.putNextEntry(new ZipEntry(GLOVES));
    gloves.writeOut(zipOut, false);
    zipOut.closeEntry();
    zipOut.finish();
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Calculate activation from previous layer including pre processing where necessary
 *
 * @param curr  the current layer
 * @param input the input
 * @return the activation from the previous layer
 */
public INDArray activationFromPrevLayer(int curr, INDArray input, boolean training) {
  if (getLayerWiseConfigurations().getInputPreProcess(curr) != null)
    input = getLayerWiseConfigurations().getInputPreProcess(curr).preProcess(input, getInputMiniBatchSize());
  INDArray ret = layers[curr].activate(input, training);
  return ret;
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Compute input linear transformation (z) from previous layer
 * Apply pre processing transformation where necessary
 *
 * @param curr  the current layer
 * @param input the input
 * @param training training or test mode
 * @return the activation from the previous layer
 */
public INDArray zFromPrevLayer(int curr, INDArray input, boolean training) {
  if (getLayerWiseConfigurations().getInputPreProcess(curr) != null)
    input = getLayerWiseConfigurations().getInputPreProcess(curr).preProcess(input, input.size(0));
  INDArray ret = layers[curr].preOutput(input, training);
  return ret;
}

代码示例来源:origin: org.deeplearning4j/arbiter-deeplearning4j

earlyStoppingConfiguration, nEpochs);
} else {
  dl4jConfiguration = new DL4JConfiguration(((MultiLayerNetwork) m).getLayerWiseConfigurations(),
          earlyStoppingConfiguration, nEpochs);

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

@Override
public INDArray preOutput(INDArray x) {
  INDArray lastLayerActivation = x;
  for (int i = 0; i < layers.length - 1; i++) {
    if (getLayerWiseConfigurations().getInputPreProcess(i) != null)
      lastLayerActivation = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(lastLayerActivation,
              getInputMiniBatchSize());
    lastLayerActivation = layers[i].activate(lastLayerActivation);
  }
  if (getLayerWiseConfigurations().getInputPreProcess(layers.length - 1) != null)
    lastLayerActivation = getLayerWiseConfigurations().getInputPreProcess(layers.length - 1)
            .preProcess(lastLayerActivation, getInputMiniBatchSize());
  return layers[layers.length - 1].preOutput(lastLayerActivation);
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Compute activations from input to output of the output layer
 *
 * @return the list of activations for each layer
 */
public List<INDArray> feedForward(INDArray input) {
  if (input == null)
    throw new IllegalStateException("Unable to perform feed forward; no input found");
  else if (this.getLayerWiseConfigurations().getInputPreProcess(0) != null)
    setInput(getLayerWiseConfigurations().getInputPreProcess(0).preProcess(input, input.size(0)));
  else
    setInput(input);
  return feedForward();
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

if (getLayerWiseConfigurations().getInputPreProcess(i) != null)
  currInput = getLayerWiseConfigurations().getInputPreProcess(i).preProcess(currInput, input.size(0));
if (layers[i] instanceof RecurrentLayer) {
  currInput = ((RecurrentLayer) layers[i]).rnnActivateUsingStoredState(currInput, training,

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

/**
 * Compute activations from input to output of the output layer
 *
 * @return the list of activations for each layer
 */
public List<INDArray> computeZ(INDArray input, boolean training) {
  if (input == null)
    throw new IllegalStateException("Unable to perform feed forward; no input found");
  else if (this.getLayerWiseConfigurations().getInputPreProcess(0) != null)
    setInput(getLayerWiseConfigurations().getInputPreProcess(0).preProcess(input, getInputMiniBatchSize()));
  else
    setInput(input);
  return computeZ(training);
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public static int getIterationCount(Model model) {
  if (model instanceof MultiLayerNetwork) {
    return ((MultiLayerNetwork) model).getLayerWiseConfigurations().getIterationCount();
  } else if (model instanceof ComputationGraph) {
    return ((ComputationGraph) model).getConfiguration().getIterationCount();
  } else {
    return model.conf().getIterationCount();
  }
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

public static void incrementIterationCount(Model model, int incrementBy) {
  if (model instanceof MultiLayerNetwork) {
    MultiLayerConfiguration conf = ((MultiLayerNetwork) model).getLayerWiseConfigurations();
    conf.setIterationCount(conf.getIterationCount() + incrementBy);
  } else if (model instanceof ComputationGraph) {
    ComputationGraphConfiguration conf = ((ComputationGraph) model).getConfiguration();
    conf.setIterationCount(conf.getIterationCount() + incrementBy);
  } else {
    model.conf().setIterationCount(model.conf().getIterationCount() + incrementBy);
  }
}

代码示例来源:origin: sjsdfg/dl4j-tutorials

public static void main(String[] args) throws Exception {
  //Define a simple MultiLayerNetwork:
  MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
    .weightInit(WeightInit.XAVIER)
      .updater(new Nesterovs(0.01, 0.9))
    .list()
    .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.TANH).build())
    .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).activation(Activation.SOFTMAX).nIn(3).nOut(3).build())
    .backprop(true).pretrain(false).build();
  MultiLayerNetwork net = new MultiLayerNetwork(conf);
  net.init();
  //Save the model
  File locationToSave = new File("model/MyMultiLayerNetwork.zip");      //Where to save the network. Note: the file is in .zip format - can be opened externally
  /**
   * 主要是用于保存模型的更新器信息
   * 如果模型保存之后还打算继续训练,则进行保存 -> true 才能根据后面的数据进行增量更新
   * 如果不打算继续训练 -> 模型定型之后,false
   */
  boolean saveUpdater = true;                                             //Updater: i.e., the state for Momentum, RMSProp, Adagrad etc. Save this if you want to train your network more in the future
  ModelSerializer.writeModel(net, locationToSave, saveUpdater);
  //Load the model
  MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(locationToSave);
  System.out.println("Saved and loaded parameters are equal:      " + net.params().equals(restored.params()));
  System.out.println("Saved and loaded configurations are equal:  " + net.getLayerWiseConfigurations().equals(restored.getLayerWiseConfigurations()));
}

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

IOutputLayer ol = (IOutputLayer) getOutputLayer();
INDArray olInput = activations.get(n - 1);
if (getLayerWiseConfigurations().getInputPreProcess(n - 1) != null) {
  olInput = getLayerWiseConfigurations().getInputPreProcess(n - 1).preProcess(olInput, input.size(0));

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

InputPreProcessor preProcessor = getLayerWiseConfigurations().getInputPreProcess(i);

代码示例来源:origin: rahul-raj/Deeplearning4J

System.out.println(restored.params()+" \n"+restored.getLayerWiseConfigurations());
INDArray output = customerLossPrediction.generateOutput(new File("test.csv"));

代码示例来源:origin: org.deeplearning4j/deeplearning4j-nn

private void initHelperMLN() {
  if (applyFrozen) {
    org.deeplearning4j.nn.api.Layer[] layers = origMLN.getLayers();
    for (int i = frozenTill; i >= 0; i--) {
      //unchecked?
      layers[i] = new FrozenLayer(layers[i]);
    }
    origMLN.setLayers(layers);
  }
  for (int i = 0; i < origMLN.getnLayers(); i++) {
    if (origMLN.getLayer(i) instanceof FrozenLayer) {
      frozenInputLayer = i;
    }
  }
  List<NeuralNetConfiguration> allConfs = new ArrayList<>();
  for (int i = frozenInputLayer + 1; i < origMLN.getnLayers(); i++) {
    allConfs.add(origMLN.getLayer(i).conf());
  }
  MultiLayerConfiguration c = origMLN.getLayerWiseConfigurations();
  unFrozenSubsetMLN = new MultiLayerNetwork(new MultiLayerConfiguration.Builder().backprop(c.isBackprop())
          .inputPreProcessors(c.getInputPreProcessors()).pretrain(c.isPretrain())
          .backpropType(c.getBackpropType()).tBPTTForwardLength(c.getTbpttFwdLength())
          .tBPTTBackwardLength(c.getTbpttBackLength()).confs(allConfs).build());
  unFrozenSubsetMLN.init();
  //copy over params
  for (int i = frozenInputLayer + 1; i < origMLN.getnLayers(); i++) {
    unFrozenSubsetMLN.getLayer(i - frozenInputLayer - 1).setParams(origMLN.getLayer(i).params());
  }
  //unFrozenSubsetMLN.setListeners(origMLN.getListeners());
}

相关文章