cassandra hadoop mapreduce:java.lang.classcastexception:java.util.hashmap不能转换为java.nio.bytebuffer

flvlnr44  于 2021-06-03  发布在  Hadoop
关注(0)|答案(1)|浏览(378)

我正在尝试用apache cassandra创建一个mapreduce作业。输入数据来自cassandra,输出数据也来自cassandra。
程序尝试从名为tweetstore的表中选择所有数据,然后插入包含用户名的行数。
这是mapreduce作业的主类:

package com.cassandra.hadoop;
import java.io.*;
import java.lang.*;
import java.util.*;
import java.nio.ByteBuffer;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.cassandra.hadoop.ColumnFamilyInputFormat;
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat;
import org.apache.cassandra.hadoop.ConfigHelper;
import org.apache.cassandra.hadoop.cql3.*;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.utils.ByteBufferUtil;

public class App 
{
static final String KEYSPACE_NAME = "tweet_cassandra_map_reduce";
static final String INPUT_COLUMN_FAMILY = "tweetstore";
static final String OUTPUT_COLUMN_FAMILY = "tweetcount";
static final String COLUMN_NAME = "user";

public static void main( String[] args ) throws IOException, InterruptedException, ClassNotFoundException 
{
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    Job job = new Job(conf, "tweet count");
    job.setJarByClass(App.class);

    // mapper configuration.
    job.setMapperClass(TweetMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    job.setInputFormatClass(ColumnFamilyInputFormat.class);

    // Reducer configuration
    job.setReducerClass(TweetAggregator.class);
    job.setOutputKeyClass(ByteBuffer.class);
    job.setOutputValueClass(List.class);
    job.setOutputFormatClass(ColumnFamilyOutputFormat.class);

    // Cassandra input column family configuration
    ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160");
    ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost");
    ConfigHelper.setInputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
    ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, INPUT_COLUMN_FAMILY);

    job.setInputFormatClass(ColumnFamilyInputFormat.class);
    SlicePredicate slicePredicate = new SlicePredicate();
    slicePredicate.setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, Integer.MAX_VALUE));

    // Prepare index expression.
    IndexExpression ixpr = new IndexExpression();ixpr.setColumn_name(ByteBufferUtil.bytes(COLUMN_NAME));
    ixpr.setOp(IndexOperator.EQ);
    ixpr.setValue(ByteBufferUtil.bytes(otherArgs.length > 0 && !StringUtils.isBlank(otherArgs[0])?otherArgs[0]: "mevivs"));

    List<IndexExpression> ixpressions = new ArrayList<IndexExpression>();
    ixpressions.add(ixpr);
    ConfigHelper.setInputRange(job.getConfiguration(), ixpressions);
    ConfigHelper.setInputSlicePredicate(job.getConfiguration(), slicePredicate);

    // Cassandra output family configuration.
    ConfigHelper.setOutputRpcPort(job.getConfiguration(), "9160");
    ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost");
    ConfigHelper.setOutputPartitioner(job.getConfiguration(), "Murmur3Partitioner");
    ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE_NAME, OUTPUT_COLUMN_FAMILY);
    job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
    job.getConfiguration().set("row_key", "key");
    job.waitForCompletion(true);
}
}

Map程序代码

package com.cassandra.hadoop;
import java.io.*;
import java.lang.*;
import java.util.*;
import java.nio.ByteBuffer;
import java.util.SortedMap;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.cassandra.db.Column;
import org.apache.cassandra.utils.ByteBufferUtil;

public class TweetMapper extends Mapper<ByteBuffer, SortedMap<ByteBuffer, Column>, Text, IntWritable>
{
static final String COLUMN_NAME = App.COLUMN_NAME;
private final static IntWritable one = new IntWritable(1);

/* (non-Javadoc)

* @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, org.apache.hadoop.mapreduce.

Mapper.Context)

* /

public void map(ByteBuffer key, SortedMap<ByteBuffer, Column> columns, Context context) throws IOException, InterruptedException
{
    Column column = columns.get(ByteBufferUtil.bytes(COLUMN_NAME));
    String value = ByteBufferUtil.string(column.value());
    context.write(new Text(value), one);
}
}

减速器代码:

package com.cassandra.hadoop;
import java.io.IOException;
import java.util.*;
import java.lang.*;
import java.io.*;
import java.nio.ByteBuffer;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.db.marshal.Int32Type;

public class TweetAggregator extends Reducer<Text,IntWritable, Map<String,ByteBuffer>, List<ByteBuffer>>
{
private static Map<String,ByteBuffer> keys = new HashMap<>();
public void reduce(Text word, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
{
    int sum = 0;
    for (IntWritable val : values)
    sum += val.get();
    System.out.println("writing");
    keys.put("key", ByteBufferUtil.bytes(word.toString()));
    context.write(keys, getBindVariables(word, sum));
}

private List<ByteBuffer> getBindVariables(Text word, int sum)
{
    List<ByteBuffer> variables = new ArrayList<ByteBuffer>();
    variables.add(Int32Type.instance.decompose(sum));
    return variables;
}
 }

当我试图用 hadoop cammand 在reduce步骤中,出现以下错误:

15/02/14 16:53:13 WARN hadoop.AbstractColumnFamilyInputFormat: ignoring  jobKeyRange specified without start_key
15/02/14 16:53:14 INFO mapred.JobClient: Running job: job_201502141652_0001
15/02/14 16:53:15 INFO mapred.JobClient:  map 0% reduce 0%
15/02/14 16:53:20 INFO mapred.JobClient:  map 66% reduce 0%
15/02/14 16:53:22 INFO mapred.JobClient:  map 100% reduce 0%
15/02/14 16:53:28 INFO mapred.JobClient:  map 100% reduce 33%
15/02/14 16:53:30 INFO mapred.JobClient: Task Id : attempt_201502141652_0001_r_000000_0, Status : FAILED
java.lang.ClassCastException: java.util.HashMap cannot be cast to java.nio.ByteBuffer
at org.apache.cassandra.hadoop.ColumnFamilyRecordWriter.write(ColumnFamilyRecordWriter.java:50)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:588)
at org.apache.hadoop.mapreduce.TaskInputOutputContext.write(TaskInputOutputContext.java:80)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:40)
at com.cassandra.hadoop.TweetAggregator.reduce(TweetAggregator.java:20)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:176)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:650)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:418)
at org.apache.hadoop.mapred.Child$4.run(Child.java:255)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1136)
at org.apache.hadoop.mapred.Child.main(Child.java:249)
attempt_201502141652_0001_r_000000_0: writing

请帮忙!!谢谢

j1dl9f46

j1dl9f461#

看起来减速机的作业设置将bytebuffer作为输出,而不是Map。尝试在作业设置中更改此设置

job.setOutputKeyClass(ByteBuffer.class);

为了这个

job.setOutputKeyClass(Map<String,ByteBuffer>.class);

不管怎样,作业中的泛型类型。设置。。。。需要跨Map器和reducer的泛型类型arg对齐,因此请检查以确保它们对齐。

相关问题