2016-08-06 6 views
0

これはMapperクラスを拡張するHadoopのMapクラス[1]の例です。 [3]はHadoopのMapperクラスです。HadoopのMapperを拡張するクラスを拡張する

MyExampleMapperを作成して、ExampleMapperを拡張し、hadoopのMapper [2]も拡張したいと考えています。 ExampleMapperを拡張しているため、MyExampleMapperなどの例を作成するときに、プロパティを自分で設定する必要はないため、プロパティをExampleMapperに設定したいからです。これは可能ですか?

[1]例マッパー

import org.apache.hadoop.mapreduce.Mapper; 

public class ExampleMapper 
    extends Mapper<Object, Text, Text, IntWritable>{ 

    private final static IntWritable one = new IntWritable(1); 
    private Text word = new Text(); 

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException { 
    StringTokenizer itr = new StringTokenizer(value.toString()); 
    while (itr.hasMoreTokens()) { 
     word.set(itr.nextToken()); 
     context.write(word, one); 
    } 
    } 
} 

[2]私が欲しいもの

import org.apache.hadoop.mapreduce.Mapper; 

public class MyExampleMapper 
    extends ExampleMapper<Object, Text, Text, IntWritable>{ 

    private final static IntWritable one = new IntWritable(1); 
    private Text word = new Text(); 

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException { 
    StringTokenizer itr = new StringTokenizer(value.toString()); 

    String result = System.getProperty("job.examplemapper") 

    if (result.equals("true")) { 
     while (itr.hasMoreTokens()) { 
     word.set(itr.nextToken()); 
     context.write(word, one); 
     } 
    } 
    } 
} 


public class ExampleMapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> 
    extends Mapper{ 

    System.setProperty("job.examplemapper", "true"); 
} 

[3]これはHadoopののマッパークラス

public class Mapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> { 
    public Mapper() { 
    } 

    protected void setup(Mapper.Context context) throws IOException, InterruptedException { 
    } 

    protected void map(KEYIN key, VALUEIN value, Mapper.Context context) throws IOException, InterruptedException { 
     context.write(key, value); 
    } 

    protected void cleanup(Mapper.Context context) throws IOException, InterruptedException { 
    } 

    public void run(Mapper.Context context) throws IOException, InterruptedException { 
     this.setup(context); 

     try { 
      while(context.nextKeyValue()) { 
       this.map(context.getCurrentKey(), context.getCurrentValue(), context); 
      } 
     } finally { 
      this.cleanup(context); 
     } 

    } 

    public class Context extends MapContext<KEYIN, VALUEIN, KEYOUT, VALUEOUT> { 
     public Context(Configuration var1, TaskAttemptID conf, RecordReader<KEYIN, VALUEIN> taskid, RecordWriter<KEYOUT, VALUEOUT> reader, OutputCommitter writer, StatusReporter committer, InputSplit reporter) throws IOException, InterruptedException { 
      super(conf, taskid, reader, writer, committer, reporter, split); 
     } 
    } 
} 

答えて

2
import org.apache.hadoop.mapreduce.Mapper; 

public class ExampleMapper<T, X, Y, Z> extends Mapper<T, X, Y, Z> { 
    static { 
     System.setProperty("job.examplemapper", "true"); 
    } 
} 

その後延長それはあなたのプログラムで

public class MyExampleMapper 
    extends ExampleMapper<Object, Text, Text, IntWritable>{ 

    private final static IntWritable one = new IntWritable(1); 
    private Text word = new Text(); 

    public void map(Object key, Text value, Context context) throws IOException, InterruptedException { 
    StringTokenizer itr = new StringTokenizer(value.toString()); 

    String result = System.getProperty("job.examplemapper") 

    if (result.equals("true")) { 
     while (itr.hasMoreTokens()) { 
     word.set(itr.nextToken()); 
     context.write(word, one); 
     } 
    } 
    } 
} 
関連する問題