在java的mapreduce编程中没有输出值

n8ghc7c1  于 2021-06-01  发布在  Hadoop
关注(0)|答案(1)|浏览(287)
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.Date;
import java.util.HashMap;
import java.text.ParseException;
import java.text.SimpleDateFormat;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

public class StubMapper extends Mapper<LongWritable, Text, Text, MinMaxCountTuple> {

    private Text outUserId = new Text();
    private MinMaxCountTuple outTuple = new MinMaxCountTuple();

    private final static SimpleDateFormat frmt = 
            new SimpleDateFormat("yyyy-MM--dd'T'HH:mm:ss.SSS");

//  public static HashMap<String, String> getMapFromCSV(String filePath) throws IOException
//  {
//      
//      HashMap<String, String> words = new HashMap<String, String>();
//      
//      /*BufferedReader in = new BufferedReader(new FileReader(filePath));
//
//      String line;
//      //= in.readLine())
//        while ((line = in.readLine()) != null) {
//            String columns[] = line.split(",");
//            if (!words.containsKey(columns[1])) {
//                words.put(columns[1], columns[6]);
//            }
//
//        }
//        
//        return words;
//        
//        */
//
//
//
//      String line=filePath;
//      
//      while(line!=null){
//          
//          String columns[] = line.split(",");
//          if (columns.length>6){
//            if (!words.containsKey(columns[1])) {
//                words.put(columns[1], columns[6]);
//            } 
//          }
//          
//      }
//      return words;
//  }

@Override
  public void map(LongWritable key, Text value, Context context)
            throws IOException, InterruptedException {

//    HashMap<String, String> parsed = getMapFromCSV(value.toString());
      //String columns[] = value.toString().split("\t");

//    String strDate = parsed.get("CheckoutDateTime");

      //String userId = columns[1];
      //String strDate = columns[6];
    if(value.toString().startsWith("BibNumber"))
    {
        return;
    }
//    String userId = parsed.get("BibNumber");
      String data[] = value.toString().split(",",-1);
      String userId = data[0];
        String DateTime = data[5];

        Date creationDate = frmt.parse(DateTime);

        outTuple.setMin(creationDate);
        outTuple.setMax(creationDate);

        outTuple.setCount(1);

        outUserId.set(userId);

        context.write(outUserId, outTuple);

        // TODO Auto-generated catch block
        e.printStackTrace();
    }

  }
}

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;

import org.apache.hadoop.io.Writable;

public class MinMaxCountTuple implements Writable{

    private Date min = new Date();
    private Date max = new Date();
    private long count = 0;

    private final static SimpleDateFormat frmt = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");

    public Date getMin()
    {
        return min;
    }

    public void setMin(Date min)
    {
        this.min = min;
    }

    public Date getMax()
    {
        return max;
    }

    public void setMax(Date max)
    {
        this.max = max;
    }

    public long getCount()
    {
        return count;
    }

    public void setCount(long count)
    {
        this.count = count;
    }

    @Override
    public void write(DataOutput out) throws IOException {
        // TODO Auto-generated method stub
        out.writeLong(min.getTime());
        out.writeLong(max.getTime());
        out.writeLong(count);
    }

    public String toString()
    {
        return frmt.format(min) + "\t" + frmt.format(max) + "\t" + count;
    }

    @Override
    public void readFields(DataInput in) throws IOException {
        // TODO Auto-generated method stub
        min = new Date(in.readLong());
        max = new Date(in.readLong());
        count = in.readLong();
    }

}

这两个代码是mapper类和minmax类,用于查找checkoutdate时间的最大值。基本上,我想做的是得到一些输出,其中日期将主要是租一本书。所以,我只是在csv文件中使用key和value作为userid和checkoutdatetime。代码运行得很好,但问题是mapper输入显示了数据的大小,然而,mapper输出只有0大小的文件,这意味着它没有从输入中获得一些输出。我不知道哪部分错了。我把我的csv文件的屏幕截图放上去了。请您开导我,我将不胜感激。谢谢。如果你需要我的代码的更多信息,只要让我知道,我会把更多的东西。

18/03/30 01:38:41 INFO mapred.JobClient:     Map input records=3794727
18/03/30 01:38:41 INFO mapred.JobClient:     Map output records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Map output bytes=0
18/03/30 01:38:41 INFO mapred.JobClient:     Input split bytes=416
18/03/30 01:38:41 INFO mapred.JobClient:     Combine input records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Combine output records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce input groups=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce shuffle bytes=24
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce input records=0
18/03/30 01:38:41 INFO mapred.JobClient:     Reduce output records=0

在此处输入图像描述

nuypyhwy

nuypyhwy1#

Map程序代码看起来不错。是否在驱动程序中显式添加了输出键和输出值。

job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(MinMaxCountTuple.class);

如果司机没有提到,你可以试试。

相关问题