org.apache.spark.mllib.linalg.Vector.apply()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(86)

本文整理了Java中org.apache.spark.mllib.linalg.Vector.apply()方法的一些代码示例,展示了Vector.apply()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Vector.apply()方法的具体详情如下:
包路径:org.apache.spark.mllib.linalg.Vector
类名称:Vector
方法名:apply

Vector.apply介绍

暂无

代码示例

代码示例来源:origin: mahmoudparsian/data-algorithms-book

static double squaredDistance(Vector a, Vector b) {
  double distance = 0.0;
  int size = a.size();
  for (int i = 0; i < size; i++) {
    double diff = a.apply(i) - b.apply(i);
    distance += diff * diff;
  }
  return distance;
}

代码示例来源:origin: mahmoudparsian/data-algorithms-book

static Vector add(Vector a, Vector b) {
  double[] sum = new double[a.size()];
  for (int i = 0; i < sum.length; i++) {
    sum[i] += a.apply(i) + b.apply(i);
  }
  return new DenseVector(sum);
}

代码示例来源:origin: mahmoudparsian/data-algorithms-book

static Vector average(List<Vector> list) {
  // find sum
  double[] sum = new double[list.get(0).size()];
  for (Vector v : list) {
    for (int i = 0; i < sum.length; i++) {
      sum[i] += v.apply(i);
    }
  }
  // find averages...
  int numOfVectors = list.size();
  for (int i = 0; i < sum.length; i++) {
    sum[i] = sum[i] / numOfVectors;
  }
  return new DenseVector(sum);
}

代码示例来源:origin: mahmoudparsian/data-algorithms-book

static Vector average(Vector vec, Integer numVectors) {
  double[] avg = new double[vec.size()];
  for (int i = 0; i < avg.length; i++) {
    // avg[i] = vec.apply(i) * (1.0 / numVectors);
    avg[i] = vec.apply(i) / ((double) numVectors);
  }
  return new DenseVector(avg);
}

代码示例来源:origin: mahmoudparsian/data-algorithms-book

for (Vector t : centroids) {
  outputWriter.write("" + t.apply(0) + " " + t.apply(1) + " " + t.apply(2) + "\n");

代码示例来源:origin: ypriverol/spark-java8

static double squaredDistance(Vector a, Vector b) {
  double distance = 0.0;
  int size = a.size();
  for (int i = 0; i < size; i++) {
    double diff = a.apply(i) - b.apply(i);
    distance += diff * diff;
  }
  return distance;
}

代码示例来源:origin: ypriverol/spark-java8

static Vector add(Vector a, Vector b) {
  double[] sum = new double[a.size()];
  for (int i = 0; i < sum.length; i++) {
    sum[i] += a.apply(i) + b.apply(i);
  }
  return new DenseVector(sum);
}

代码示例来源:origin: ypriverol/spark-java8

static Vector average(List<Vector> list) {
  // find sum
  double[] sum = new double[list.get(0).size()];
  for (Vector v : list) {
    for (int i = 0; i < sum.length; i++) {
      sum[i] += v.apply(i);
    }
  }
  // find averages...
  int numOfVectors = list.size();
  for (int i = 0; i < sum.length; i++) {
    sum[i] = sum[i] / numOfVectors;
  }
  return new DenseVector(sum);
}

代码示例来源:origin: ypriverol/spark-java8

static Vector average(Vector vec, Integer numVectors) {
  double[] avg = new double[vec.size()];
  for (int i = 0; i < avg.length; i++) {
    // avg[i] = vec.apply(i) * (1.0 / numVectors);
    avg[i] = vec.apply(i) / ((double) numVectors);
  }
  return new DenseVector(avg);
}

代码示例来源:origin: ypriverol/spark-java8

for (Vector t : centroids) {
  outputWriter.write("" + t.apply(0) + " " + t.apply(1) + " " + t.apply(2) + "\n");

代码示例来源:origin: phuonglh/vn.vitk

if (k > 0) {
  for (int j : features.toSparse().indices())
    score[k] += weights.apply((k-1) * d + j);
  if (score[k] > maxScore) {
    maxScore = score[k];

代码示例来源:origin: ddf-project/DDF

Vector vector = (Vector) sample;
if (mHasLabels) {
 label = vector.apply(vector.size() - 1);
 features = Arrays.copyOf(vector.toArray(), vector.size() - 1);
} else {

代码示例来源:origin: org.apache.spark/spark-mllib

@Test
public void tfIdf() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF();
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib

@Test
public void tfIdfMinimumDocumentFrequency() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF(2);
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

@Test
public void tfIdfMinimumDocumentFrequency() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF(2);
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

@Test
public void tfIdf() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF();
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.10

@Test
public void tfIdf() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF();
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.10

@Test
public void tfIdfMinimumDocumentFrequency() {
 // The tests are to check Java compatibility.
 HashingTF tf = new HashingTF();
 @SuppressWarnings("unchecked")
 JavaRDD<List<String>> documents = jsc.parallelize(Arrays.asList(
  Arrays.asList("this is a sentence".split(" ")),
  Arrays.asList("this is another sentence".split(" ")),
  Arrays.asList("this is still a sentence".split(" "))), 2);
 JavaRDD<Vector> termFreqs = tf.transform(documents);
 termFreqs.collect();
 IDF idf = new IDF(2);
 JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
 List<Vector> localTfIdfs = tfIdfs.collect();
 int indexOfThis = tf.indexOf("this");
 for (Vector v : localTfIdfs) {
  Assert.assertEquals(0.0, v.apply(indexOfThis), 1e-15);
 }
}

代码示例来源:origin: locationtech/geowave

final int index = clusterModel.predict(center);
final double lon = center.apply(0);
final double lat = center.apply(1);
 final double timeVal = center.apply(2);

相关文章

微信公众号

最新文章

更多