org.apache.hadoop.io.WritableFactories Java Examples
The following examples show how to use
org.apache.hadoop.io.WritableFactories.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HadoopInputSplit.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example #2
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } hadoopInputSplit.readFields(in); }
Example #3
Source File: SpreadSheetCellDAO.java From hadoopoffice with Apache License 2.0 | 6 votes |
@Override public void readFields(DataInput dataInput) throws IOException { Text formattedValueText = (Text) WritableFactories.newInstance(Text.class); formattedValueText.readFields(dataInput); this.formattedValue=formattedValueText.toString(); Text commentText = (Text) WritableFactories.newInstance(Text.class); commentText.readFields(dataInput); this.comment=commentText.toString(); Text formulaText = (Text) WritableFactories.newInstance(Text.class); formulaText.readFields(dataInput); this.formula=formulaText.toString(); Text addressText = (Text) WritableFactories.newInstance(Text.class); addressText.readFields(dataInput); this.address=addressText.toString(); Text sheetNameText = (Text) WritableFactories.newInstance(Text.class); sheetNameText.readFields(dataInput); this.sheetName=sheetNameText.toString(); }
Example #4
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 6 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType); } catch (Exception e) { throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e); } if (needsJobConf(hadoopInputSplit)) { // the job conf knows how to deserialize itself jobConf = new JobConf(); jobConf.readFields(in); if (hadoopInputSplit instanceof Configurable) { ((Configurable) hadoopInputSplit).setConf(this.jobConf); } else if (hadoopInputSplit instanceof JobConfigurable) { ((JobConfigurable) hadoopInputSplit).configure(this.jobConf); } } hadoopInputSplit.readFields(in); }
Example #5
Source File: HadoopInputSplitWrapper.java From stratosphere with Apache License 2.0 | 6 votes |
@Override public void read(DataInput in) throws IOException { this.splitNumber=in.readInt(); this.hadoopInputSplitTypeName = in.readUTF(); if(hadoopInputSplit == null) { try { Class<? extends org.apache.hadoop.io.Writable> inputSplit = Class.forName(hadoopInputSplitTypeName).asSubclass(org.apache.hadoop.io.Writable.class); this.hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance( inputSplit ); } catch (Exception e) { throw new RuntimeException("Unable to create InputSplit", e); } } this.hadoopInputSplit.readFields(in); }
Example #6
Source File: HadoopInputSplit.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { Class<? extends Writable> writableSplit = splitType.asSubclass(Writable.class); mapreduceInputSplit = (org.apache.hadoop.mapreduce.InputSplit) WritableFactories.newInstance(writableSplit); } catch (Exception e) { throw new RuntimeException("Unable to instantiate the Hadoop InputSplit", e); } ((Writable) mapreduceInputSplit).readFields(in); }
Example #7
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 5 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { Class<? extends Writable> writableSplit = splitType.asSubclass(Writable.class); mapreduceInputSplit = (org.apache.hadoop.mapreduce.InputSplit) WritableFactories.newInstance(writableSplit); } catch (Exception e) { throw new RuntimeException("Unable to instantiate the Hadoop InputSplit", e); } ((Writable) mapreduceInputSplit).readFields(in); }
Example #8
Source File: HadoopInputSplit.java From flink with Apache License 2.0 | 5 votes |
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { // read the parent fields and the final fields in.defaultReadObject(); try { Class<? extends Writable> writableSplit = splitType.asSubclass(Writable.class); mapreduceInputSplit = (org.apache.hadoop.mapreduce.InputSplit) WritableFactories.newInstance(writableSplit); } catch (Exception e) { throw new RuntimeException("Unable to instantiate the Hadoop InputSplit", e); } ((Writable) mapreduceInputSplit).readFields(in); }
Example #9
Source File: FileReducerViolationBean.java From jumbune with GNU Lesser General Public License v3.0 | 5 votes |
public void readFields(DataInput in) throws IOException { fileViolation.clear(); int entries = in.readInt(); for(int i=0;i<entries;i++){ Writable key = WritableFactories.newInstance(Text.class); key.readFields(in); Writable value = WritableFactories.newInstance(TotalReducerViolationBean.class); value.readFields(in); fileViolation.put(key, value); } count.readFields(in); }
Example #10
Source File: JsonLineViolationBean.java From jumbune with GNU Lesser General Public License v3.0 | 5 votes |
public void readFields(DataInput in) throws IOException { lineNumber.readFields(in); int listSize = in.readInt(); for(int i =0; i <listSize ; i++){ Writable value = WritableFactories.newInstance(JsonKeyViolationBean.class); value.readFields(in); jsonKeyViolationList.add((JsonKeyViolationBean) value); } }