gov.sandia.cognition.math.matrix.Vector.minusEquals()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(63)

本文整理了Java中gov.sandia.cognition.math.matrix.Vector.minusEquals()方法的一些代码示例,展示了Vector.minusEquals()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Vector.minusEquals()方法的具体详情如下:
包路径:gov.sandia.cognition.math.matrix.Vector
类名称:Vector
方法名:minusEquals

Vector.minusEquals介绍

暂无

代码示例

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

@Override
public boolean removeClusterMember(
  final CentroidCluster<Vector> cluster,
  final Vector member)
{
  if (cluster.getMembers().remove(member))
  {
    final int newSize = cluster.getMembers().size();
    Vector centroid = cluster.getCentroid();
    if (newSize <= 0)
    {
      centroid.zero();
    }
    else
    {
      final Vector delta = member.minus(centroid);
      delta.scaleEquals(1.0 / newSize);
      centroid.minusEquals(delta);
    }
    return true;
  }
  else
  {
    return false;
  }
}

代码示例来源:origin: algorithmfoundry/Foundry

@Override
public boolean removeClusterMember(
  final CentroidCluster<Vector> cluster,
  final Vector member)
{
  if (cluster.getMembers().remove(member))
  {
    final int newSize = cluster.getMembers().size();
    Vector centroid = cluster.getCentroid();
    if (newSize <= 0)
    {
      centroid.zero();
    }
    else
    {
      final Vector delta = member.minus(centroid);
      delta.scaleEquals(1.0 / newSize);
      centroid.minusEquals(delta);
    }
    return true;
  }
  else
  {
    return false;
  }
}

代码示例来源:origin: algorithmfoundry/Foundry

@Override
public boolean removeClusterMember(
  final CentroidCluster<Vector> cluster,
  final Vector member)
{
  if (cluster.getMembers().remove(member))
  {
    final int newSize = cluster.getMembers().size();
    Vector centroid = cluster.getCentroid();
    if (newSize <= 0)
    {
      centroid.zero();
    }
    else
    {
      final Vector delta = member.minus(centroid);
      delta.scaleEquals(1.0 / newSize);
      centroid.minusEquals(delta);
    }
    return true;
  }
  else
  {
    return false;
  }
}

代码示例来源:origin: algorithmfoundry/Foundry

for( Vector x : this.getData() )
  x.minusEquals( this.mean );

代码示例来源:origin: algorithmfoundry/Foundry

for( Vector x : this.getData() )
  x.minusEquals( this.mean );

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

for( Vector x : this.getData() )
  x.minusEquals( this.mean );

代码示例来源:origin: algorithmfoundry/Foundry

this.termGlobalFrequencies.minusEquals(counts);

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-text-core

this.termGlobalFrequencies.minusEquals(counts);

代码示例来源:origin: algorithmfoundry/Foundry

this.termGlobalFrequencies.minusEquals(counts);

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

public Object computeParameterGradientPartial(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  return new GradientPartialSSE( negativeSum, denominator );        
}

代码示例来源:origin: algorithmfoundry/Foundry

public Object computeParameterGradientPartial(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  return new GradientPartialSSE( negativeSum, denominator );        
}

代码示例来源:origin: algorithmfoundry/Foundry

public Object computeParameterGradientPartial(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  return new GradientPartialSSE( negativeSum, denominator );        
}

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

public Vector computeParameterGradient(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  if (denominator != 0.0)
  {
    negativeSum.scaleEquals( 1.0 / denominator );
  }
  return negativeSum;
}

代码示例来源:origin: algorithmfoundry/Foundry

public Vector computeParameterGradient(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  if (denominator != 0.0)
  {
    negativeSum.scaleEquals( 1.0 / denominator );
  }
  return negativeSum;
}

代码示例来源:origin: algorithmfoundry/Foundry

public Vector computeParameterGradient(
  GradientDescendable function )
{
  RingAccumulator<Vector> parameterDelta =
    new RingAccumulator<Vector>();
  double denominator = 0.0;
  for (InputOutputPair<? extends Vector, ? extends Vector> pair : this.getCostParameters())
  {
    Vector input = pair.getInput();
    Vector target = pair.getOutput();
    Vector negativeError = function.evaluate( input );
    negativeError.minusEquals( target );
    double weight = DatasetUtil.getWeight(pair);
    if (weight != 1.0)
    {
      negativeError.scaleEquals( weight );
    }
    denominator += weight;
    Matrix gradient = function.computeParameterGradient( input );
    Vector parameterUpdate = negativeError.times( gradient );
    parameterDelta.accumulate( parameterUpdate );
  }
  Vector negativeSum = parameterDelta.getSum();
  if (denominator != 0.0)
  {
    negativeSum.scaleEquals( 1.0 / denominator );
  }
  return negativeSum;
}

代码示例来源:origin: algorithmfoundry/Foundry

function.convertFromVector( p );
Vector fjx = function.evaluate( input );
fjx.minusEquals( fx );
fjx.scaleEquals( 1.0 / deltaSize );
J.setColumn( j, fjx );

代码示例来源:origin: algorithmfoundry/Foundry

function.convertFromVector( p );
Vector fjx = function.evaluate( input );
fjx.minusEquals( fx );
fjx.scaleEquals( 1.0 / deltaSize );
J.setColumn( j, fjx );

代码示例来源:origin: gov.sandia.foundry/gov-sandia-cognition-learning-core

function.convertFromVector( p );
Vector fjx = function.evaluate( input );
fjx.minusEquals( fx );
fjx.scaleEquals( 1.0 / deltaSize );
J.setColumn( j, fjx );

代码示例来源:origin: algorithmfoundry/Foundry

delta.minusEquals(lambda);
Matrix betahat = sampleCovariance;
if( n > 1 )

代码示例来源:origin: algorithmfoundry/Foundry

delta.minusEquals(lambda);
Matrix betahat = sampleCovariance;
if( n > 1 )

相关文章

微信公众号

最新文章

更多