mapreduce程序

b4lqfgs4  于 2021-06-03  发布在  Hadoop
关注(0)|答案(1)|浏览(221)

尝试java mapreduce问题。当我用下面的命令编译代码时,我得到了一些错误,它们列在下面。。请帮帮我。提前谢谢
源代码

package cvkumar.hadoopmr;
import java.io.IOException;
import java.util.StrinTokenizer;
import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.*;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
import org.apache.hadoop.util.GenericOptionsParser;

public class Dictionary 
{
    public static class WordMapper extends Mapper <Text, Text, Text, Text>
    {
        private Text word = new Text();
        public void map(Text key, Text value, Context context)
            throws IOException, InterruptedException
        {
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            while (itr.hasMoreTokens())
            {
                word.set(itr.nextToken());
                context.write(key,word);
            }
        }
    }
    public static class AllTranslationsReducer
        extends Reducer<Text,Text,Text,Text>
    {
            private Text result = new Text();
            public void reduce(Text key, Iterable<Text> values,Context context)
         throws IOException, InterruptedException
            {
                String translations = "";
                for (Text val : values)
                {
                    translations += "|"+val.toString();
                }
                result.set(translations);
                context.write(key, result);
            }
        }

    public static void main(String[] args) throws Exception
    {
            Configuration conf = new Configuration();
            Job job = new Job(conf, "dictionary");
            job.setJarByClass(Dictionary.class);
            job.setMapperClass(WordMapper.class);
            job.setReducerClass(AllTranslationsReducer.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            job.setInputFormatClass(KeyValueTextInputFormat.class);
            //FileInputFormat.addInputPath(job, new Path("/tmp/hadoop-cscarioni/dfs/name/file"));
            //FileOutputFormat.setOutputPath(job, new Path("output"));
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
}

错误
$javac-classpath hadoop-core-1.2.1.jar-d./dictionary./cvkumar/hadoopmr/dictionary.java

hadoop@hadoop-Vostro1310:~/hadoop-1.2.1$ javac -classpath hadoop-core-1.2.1.jar -d ./Dictionary ./cvkumar/hadoopmr/Dictionary.java 
./cvkumar/hadoopmr/Dictionary.java:3: cannot find symbol
symbol  : class StrinTokenizer
location: package java.util
import java.util.StrinTokenizer;
                ^
./cvkumar/hadoopmr/Dictionary.java:15: cannot find symbol
symbol  : class KeyValueTextInputForm
location: package org.apache.hadoop.mapreduce.lib.input
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputForm;
                                            ^
./cvkumar/hadoopmr/Dictionary.java:16: cannot find symbol
symbol  : class FileOutFormat
location: package org.apache.hadoop.mapreduce.lib.output
import org.apache.hadoop.mapreduce.lib.output.FileOutFormat;
                                             ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
            ^
./cvkumar/hadoopmr/Dictionary.java:27: cannot find symbol
symbol  : class StringTokenizer
location: class cvkumar.hadoopmr.Dictionary.WordMapper
            StringTokenizer itr = new StringTokenizer(value.toString(),",");
                                      ^
./cvkumar/hadoopmr/Dictionary.java:61: setInputFormatClass(java.lang.Class<? extends org.apache.hadoop.mapreduce.InputFormat>) in org.apache.hadoop.mapreduce.Job cannot be applied to (java.lang.Class<org.apache.hadoop.mapred.KeyValueTextInputFormat>)
            job.setInputFormatClass(KeyValueTextInputFormat.class);
               ^
./cvkumar/hadoopmr/Dictionary.java:65: setOutputPath(org.apache.hadoop.mapred.JobConf,org.apache.hadoop.fs.Path) in org.apache.hadoop.mapred.FileOutputFormat cannot be applied to (org.apache.hadoop.mapreduce.Job,org.apache.hadoop.fs.Path)
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
                        ^
7 errors
wqsoz72f

wqsoz72f1#

答案已经在那里了,由java编译器提供。更改以下行:
第3行:

import java.util.StringTokenizer;

第15行:

import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;

第16行:

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

提示:如果您使用的是像eclipse或netbeans这样的ide,java编译错误应该已经突出显示,并向您展示如何解决它们的提示。如果您不使用ide,我强烈建议您这样做!既然您正在编写mapreduce程序,我建议使用eclipse,您可以找到一个hadoop插件。

相关问题