Java Code Examples for org.apache.hadoop.mapreduce.InputSplit

The following examples show how to use org.apache.hadoop.mapreduce.InputSplit. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: hadoop   Source File: GenerateData.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext jobCtxt) throws IOException {
  final JobClient client =
    new JobClient(new JobConf(jobCtxt.getConfiguration()));
  ClusterStatus stat = client.getClusterStatus(true);
  final long toGen =
    jobCtxt.getConfiguration().getLong(GRIDMIX_GEN_BYTES, -1);
  if (toGen < 0) {
    throw new IOException("Invalid/missing generation bytes: " + toGen);
  }
  final int nTrackers = stat.getTaskTrackers();
  final long bytesPerTracker = toGen / nTrackers;
  final ArrayList<InputSplit> splits = new ArrayList<InputSplit>(nTrackers);
  final Pattern trackerPattern = Pattern.compile("tracker_([^:]*):.*");
  final Matcher m = trackerPattern.matcher("");
  for (String tracker : stat.getActiveTrackerNames()) {
    m.reset(tracker);
    if (!m.find()) {
      System.err.println("Skipping node: " + tracker);
      continue;
    }
    final String name = m.group(1);
    splits.add(new GenSplit(bytesPerTracker, new String[] { name }));
  }
  return splits;
}
 
Example 2
Source Project: accumulo-examples   Source File: TeraSortIngest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create the desired number of splits, dividing the number of rows between the mappers.
 */
@Override
public List<InputSplit> getSplits(JobContext job) {
  long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
  int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
  long rowsPerSplit = totalRows / numSplits;
  log.info(
      "Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
  ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
  long currentRow = 0;
  for (int split = 0; split < numSplits - 1; ++split) {
    splits.add(new RangeInputSplit(currentRow, rowsPerSplit));
    currentRow += rowsPerSplit;
  }
  splits.add(new RangeInputSplit(currentRow, totalRows - currentRow));
  log.info("Done Generating.");
  return splits;
}
 
Example 3
Source Project: big-c   Source File: TestFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSplitLocationInfo() throws Exception {
  Configuration conf = getConfiguration();
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      "test:///a1/a2");
  Job job = Job.getInstance(conf);
  TextInputFormat fileInputFormat = new TextInputFormat();
  List<InputSplit> splits = fileInputFormat.getSplits(job);
  String[] locations = splits.get(0).getLocations();
  Assert.assertEquals(2, locations.length);
  SplitLocationInfo[] locationInfo = splits.get(0).getLocationInfo();
  Assert.assertEquals(2, locationInfo.length);
  SplitLocationInfo localhostInfo = locations[0].equals("localhost") ?
      locationInfo[0] : locationInfo[1];
  SplitLocationInfo otherhostInfo = locations[0].equals("otherhost") ?
      locationInfo[0] : locationInfo[1];
  Assert.assertTrue(localhostInfo.isOnDisk());
  Assert.assertTrue(localhostInfo.isInMemory());
  Assert.assertTrue(otherhostInfo.isOnDisk());
  Assert.assertFalse(otherhostInfo.isInMemory());
}
 
Example 4
Source Project: Hadoop-BAM   Source File: TestCRAMInputFormat.java    License: MIT License 6 votes vote down vote up
@Test
public void testReader() throws Exception {
  int expectedCount = 0;
  SamReader samReader = SamReaderFactory.makeDefault()
      .referenceSequence(new File(URI.create(reference))).open(new File(input));
  for (SAMRecord r : samReader) {
    expectedCount++;
  }

  AnySAMInputFormat inputFormat = new AnySAMInputFormat();
  List<InputSplit> splits = inputFormat.getSplits(jobContext);
  assertEquals(1, splits.size());
  RecordReader<LongWritable, SAMRecordWritable> reader = inputFormat
      .createRecordReader(splits.get(0), taskAttemptContext);
  reader.initialize(splits.get(0), taskAttemptContext);

  int actualCount = 0;
  while (reader.nextKeyValue()) {
    actualCount++;
  }

  assertEquals(expectedCount, actualCount);
}
 
Example 5
Source Project: tinkerpop   Source File: HadoopElementIterator.java    License: Apache License 2.0 6 votes vote down vote up
public HadoopElementIterator(final HadoopGraph graph) {
    try {
        this.graph = graph;
        final Configuration configuration = ConfUtil.makeHadoopConfiguration(this.graph.configuration());
        final InputFormat<NullWritable, VertexWritable> inputFormat = ConfUtil.getReaderAsInputFormat(configuration);
        if (inputFormat instanceof FileInputFormat) {
            final Storage storage = FileSystemStorage.open(configuration);
            if (!this.graph.configuration().containsKey(Constants.GREMLIN_HADOOP_INPUT_LOCATION))
                return; // there is no input location and thus, no data (empty graph)
            if (!Constants.getSearchGraphLocation(this.graph.configuration().getInputLocation(), storage).isPresent())
                return; // there is no data at the input location (empty graph)
            configuration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, Constants.getSearchGraphLocation(this.graph.configuration().getInputLocation(), storage).get());
        }
        final List<InputSplit> splits = inputFormat.getSplits(new JobContextImpl(configuration, new JobID(UUID.randomUUID().toString(), 1)));
        for (final InputSplit split : splits) {
            this.readers.add(inputFormat.createRecordReader(split, new TaskAttemptContextImpl(configuration, new TaskAttemptID())));
        }
    } catch (final Exception e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
}
 
Example 6
public SMRecordReaderImpl getRecordReader(InputSplit split, Configuration config) throws IOException,
        InterruptedException {
    config.addResource(conf);
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "getRecordReader with table=%s, inputTable=%s," +
                "conglomerate=%s",
                table,
                config.get(TableInputFormat.INPUT_TABLE),
                config.get(MRConstants.SPLICE_INPUT_CONGLOMERATE));
    rr = new SMRecordReaderImpl(conf);
    if(table == null){
        TableName tableInfo = TableName.valueOf(config.get(TableInputFormat.INPUT_TABLE));
        PartitionFactory tableFactory=SIDriver.driver().getTableFactory();
        table = ((ClientPartition)tableFactory.getTable(tableInfo)).unwrapDelegate();
    }
    rr.setHTable(table);
    if (LOG.isDebugEnabled())
        SpliceLogUtils.debug(LOG, "returning record reader");
    return rr;
}
 
Example 7
Source Project: components   Source File: CSVFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public CSVFileRecordReader createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
  String delimiter = context.getConfiguration().get(TALEND_ROW_DELIMITED);
  String encoding = context.getConfiguration().get(TALEND_ENCODING);

  String textEnclosure = context.getConfiguration().get(TALEND_TEXT_ENCLOSURE);
  String escapeChar = context.getConfiguration().get(TALEND_ESCAPE);

  Character te = null;
  Character ec = null;

  if (textEnclosure != null && !textEnclosure.isEmpty()) {
    te = textEnclosure.charAt(0);
  }

  if (escapeChar != null && !escapeChar.isEmpty()) {
    ec = escapeChar.charAt(0);
  }

  return createRecordReader(delimiter, encoding, te, ec);
}
 
Example 8
Source Project: marklogic-contentpump   Source File: DelimitedTextReader.java    License: Apache License 2.0 6 votes vote down vote up
protected void initParser(InputSplit inSplit) throws IOException,
    InterruptedException {
    fileIn = openFile(inSplit, true);
    if (fileIn == null) {
        return;
    }
    instream = new InputStreamReader(fileIn, encoding);

    bytesRead = 0;
    fileLen = inSplit.getLength();
    if (uriName == null) {
        generateId = conf.getBoolean(CONF_INPUT_GENERATE_URI, false);
        if (generateId) {
            idGen = new IdGenerator(file.toUri().getPath() + "-"
                + ((FileSplit) inSplit).getStart());
        } else {
            uriId = 0;
        }
    }
    parser = new CSVParser(instream, CSVParserFormatter.
    		getFormat(delimiter, encapsulator, true,
    				true));
    parserIterator = parser.iterator();
}
 
Example 9
Source Project: hadoop   Source File: DistSum.java    License: Apache License 2.0 5 votes vote down vote up
/** @return a list containing a single split of summation */
@Override
public List<InputSplit> getSplits(JobContext context) {
  //read sigma from conf
  final Configuration conf = context.getConfiguration();
  final Summation sigma = SummationWritable.read(DistSum.class, conf); 
  
  //create splits
  final List<InputSplit> splits = new ArrayList<InputSplit>(1);
  splits.add(new SummationSplit(sigma));
  return splits;
}
 
Example 10
@Override
public RecordReader<BytesWritable,BytesWritable> createRecordReader(InputSplit split, TaskAttemptContext ctx)  throws IOException {
	/** Create reader **/
	try {
		return new BitcoinRawBlockRecordReader(ctx.getConfiguration());
	} catch (HadoopCryptoLedgerConfigurationException e) {
		// log
		LOG.error(e);
	}
	return null;
}
 
Example 11
Source Project: hadoop   Source File: BooleanSplitter.java    License: Apache License 2.0 5 votes vote down vote up
public List<InputSplit> split(Configuration conf, ResultSet results, String colName)
    throws SQLException {

  List<InputSplit> splits = new ArrayList<InputSplit>();

  if (results.getString(1) == null && results.getString(2) == null) {
    // Range is null to null. Return a null split accordingly.
    splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
        colName + " IS NULL", colName + " IS NULL"));
    return splits;
  }

  boolean minVal = results.getBoolean(1);
  boolean maxVal = results.getBoolean(2);

  // Use one or two splits.
  if (!minVal) {
    splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
        colName + " = FALSE", colName + " = FALSE"));
  }

  if (maxVal) {
    splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
        colName + " = TRUE", colName + " = TRUE"));
  }

  if (results.getString(1) == null || results.getString(2) == null) {
    // Include a null value.
    splits.add(new DataDrivenDBInputFormat.DataDrivenDBInputSplit(
        colName + " IS NULL", colName + " IS NULL"));
  }

  return splits;
}
 
Example 12
@Override
public void initialize(InputSplit inSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {
    Path file = ((FileSplit)inSplit).getPath();
    FileSystem fs = file.getFileSystem(context.getConfiguration());
    FSDataInputStream fileIn = fs.open(file);
    zipIn = new ZipInputStream(fileIn);
}
 
Example 13
Source Project: big-c   Source File: CompositeRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
public void initialize(InputSplit split, TaskAttemptContext context) 
    throws IOException, InterruptedException {
  if (kids != null) {
    for (int i = 0; i < kids.length; ++i) {
      kids[i].initialize(((CompositeInputSplit)split).get(i), context);
      if (kids[i].key() == null) {
        continue;
      }
      
      // get keyclass
      if (keyclass == null) {
        keyclass = kids[i].createKey().getClass().
          asSubclass(WritableComparable.class);
      }
      // create priority queue
      if (null == q) {
        cmp = WritableComparator.get(keyclass, conf);
        q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
              new Comparator<ComposableRecordReader<K,?>>() {
                public int compare(ComposableRecordReader<K,?> o1,
                                   ComposableRecordReader<K,?> o2) {
                  return cmp.compare(o1.key(), o2.key());
                }
              });
      }
      // Explicit check for key class agreement
      if (!keyclass.equals(kids[i].key().getClass())) {
        throw new ClassCastException("Child key classes fail to agree");
      }
      
      // add the kid to priority queue if it has any elements
      if (kids[i].hasNext()) {
        q.add(kids[i]);
      }
    }
  }
}
 
Example 14
Source Project: datawave   Source File: WikipediaTestBed.java    License: Apache License 2.0 5 votes vote down vote up
protected InputSplit getSplit(String file) throws URISyntaxException, MalformedURLException {
    URL data = WikipediaRecordReaderTest.class.getResource(file);
    if (data == null) {
        File fileObj = new File(file);
        if (fileObj.exists()) {
            data = fileObj.toURI().toURL();
        }
    }
    assertNotNull("Did not find test resource", data);
    
    File dataFile = new File(data.toURI());
    Path p = new Path(dataFile.toURI().toString());
    return new FileSplit(p, 0, dataFile.length(), null);
}
 
Example 15
Source Project: geowave   Source File: BaseMapReduceDataStore.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(
    final CommonQueryOptions commonOptions,
    final DataTypeQueryOptions<?> typeOptions,
    final IndexQueryOptions indexOptions,
    final QueryConstraints constraints,
    final TransientAdapterStore adapterStore,
    final AdapterIndexMappingStore aimStore,
    final DataStatisticsStore statsStore,
    final InternalAdapterStore internalAdapterStore,
    final IndexStore indexStore,
    final JobContext context,
    final Integer minSplits,
    final Integer maxSplits) throws IOException, InterruptedException {
  return splitsProvider.getSplits(
      baseOperations,
      commonOptions,
      typeOptions,
      indexOptions,
      constraints,
      adapterStore,
      statsStore,
      internalAdapterStore,
      indexStore,
      indexMappingStore,
      context,
      minSplits,
      maxSplits);
}
 
Example 16
Source Project: hadoop   Source File: TestSplitters.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout=2000)
public void testBooleanSplitter() throws Exception{
  BooleanSplitter splitter = new BooleanSplitter();
  ResultSet result = mock(ResultSet.class);
  when(result.getString(1)).thenReturn("result1");
  
  List<InputSplit> splits=splitter.split(configuration, result, "column");
  assertSplits(new String[] {"column = FALSE column = FALSE",
      "column IS NULL column IS NULL"}, splits);
  
  when(result.getString(1)).thenReturn("result1");
  when(result.getString(2)).thenReturn("result2");
  when(result.getBoolean(1)).thenReturn(true);
  when(result.getBoolean(2)).thenReturn(false);

  splits=splitter.split(configuration, result, "column");
  assertEquals(0, splits.size());

  when(result.getString(1)).thenReturn("result1");
  when(result.getString(2)).thenReturn("result2");
  when(result.getBoolean(1)).thenReturn(false);
  when(result.getBoolean(2)).thenReturn(true);

  splits = splitter.split(configuration, result, "column");
  assertSplits(new String[] {
      "column = FALSE column = FALSE", ".*column = TRUE"}, splits);
}
 
Example 17
Source Project: datacollector   Source File: SimpleTestInputFormat.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException {
  Configuration conf = jobContext.getConfiguration();

  if(conf.getBoolean(THROW_EXCEPTION, false)) {
    throw new IOException("Throwing exception as instructed, failure in bootstraping MR job.");
  }

  String fileLocation = conf.get(FILE_LOCATION);
  if(fileLocation != null) {
    FileUtils.writeStringToFile(new File(fileLocation), conf.get(FILE_VALUE));
  }

  return Collections.emptyList();
}
 
Example 18
Source Project: hadoop   Source File: JobSplitWriter.java    License: Apache License 2.0 5 votes vote down vote up
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
 
Example 19
Source Project: big-c   Source File: JobSplitWriter.java    License: Apache License 2.0 5 votes vote down vote up
public static void createSplitFiles(Path jobSubmitDir, 
    Configuration conf, FileSystem fs, 
    org.apache.hadoop.mapred.InputSplit[] splits) 
throws IOException {
  FSDataOutputStream out = createFile(fs, 
      JobSubmissionFiles.getJobSplitFile(jobSubmitDir), conf);
  SplitMetaInfo[] info = writeOldSplits(splits, out, conf);
  out.close();
  writeJobSplitMetaInfo(fs,JobSubmissionFiles.getJobSplitMetaFile(jobSubmitDir), 
      new FsPermission(JobSubmissionFiles.JOB_FILE_PERMISSION), splitVersion,
      info);
}
 
Example 20
Source Project: incubator-tez   Source File: MapContextImpl.java    License: Apache License 2.0 5 votes vote down vote up
public MapContextImpl(Configuration conf, TaskAttemptID taskid,
                      RecordReader<KEYIN,VALUEIN> reader,
                      RecordWriter<KEYOUT,VALUEOUT> writer,
                      OutputCommitter committer,
                      TezTaskContext context,
                      InputSplit split, Reporter reporter) {
  super(conf, taskid, writer, committer, context, reporter);
  this.reader = reader;
  this.split = split;
}
 
Example 21
@Override
public List<InputSplit> getSplits(JobContext job) throws IOException {
  List<InputSplit> splits = new ArrayList<InputSplit>();
  Configuration conf = job.getConfiguration();
  String dsName
      = conf.get(MainframeConfiguration.MAINFRAME_INPUT_DATASET_NAME);
  LOG.info("Datasets to transfer from: " + dsName);
  List<String> datasets = retrieveDatasets(dsName, conf);
  if (datasets.isEmpty()) {
    throw new IOException ("No sequential datasets retrieved from " + dsName);
  } else {
    int count = datasets.size();
    int chunks = Math.min(count, ConfigurationHelper.getJobNumMaps(job));
    for (int i = 0; i < chunks; i++) {
      splits.add(new MainframeDatasetInputSplit());
    }

    int j = 0;
    while(j < count) {
      for (InputSplit sp : splits) {
        if (j == count) {
          break;
        }
        ((MainframeDatasetInputSplit)sp).addDataset(datasets.get(j));
        j++;
      }
    }
  }
  return splits;
}
 
Example 22
@Override
public void initialize(InputSplit genericSplit, TaskAttemptContext context)
    throws IOException, InterruptedException {
  FileSplit split = (FileSplit) genericSplit;
  Configuration conf = context.getConfiguration();
  SeekableInput in = new FsInput(split.getPath(), conf);
  DatumReader<T> datumReader = new GenericDatumReader<T>();
  this.reader = DataFileReader.openReader(in, datumReader);
  reader.sync(split.getStart());                    // sync to start
  this.start = reader.tell();
  this.end = split.getStart() + split.getLength();
}
 
Example 23
Source Project: hbase   Source File: NMapInputFormat.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext context) {
  int count = getNumMapTasks(context.getConfiguration());
  List<InputSplit> splits = new ArrayList<>(count);
  for (int i = 0; i < count; i++) {
    splits.add(new NullInputSplit());
  }
  return splits;
}
 
Example 24
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException,
    InterruptedException {
  int targetNumTasks = ConfigurationHelper.getJobNumMaps(context);
  List<InputSplit> splits = new ArrayList<InputSplit>(targetNumTasks);
  for (int i = 0; i < targetNumTasks; ++i) {
    splits.add(new NetezzaExternalTableInputSplit(i));
  }
  return splits;
}
 
Example 25
Source Project: ignite   Source File: HadoopSortingTest.java    License: Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
@Override public List<InputSplit> getSplits(JobContext ctx) throws IOException, InterruptedException {
    List<InputSplit> res = new ArrayList<>();

    FakeSplit split = new FakeSplit(20);

    for (int i = 0; i < 10; i++)
        res.add(split);

    return res;
}
 
Example 26
Source Project: phoenix   Source File: PhoenixInputFormat.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {  
    final Configuration configuration = context.getConfiguration();
    final QueryPlan queryPlan = getQueryPlan(context,configuration);
    final List<KeyRange> allSplits = queryPlan.getSplits();
    final List<InputSplit> splits = generateSplits(queryPlan,allSplits);
    return splits;
}
 
Example 27
Source Project: kylin   Source File: MergeDictionaryJob.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException {
    int numMapTasks = jobContext.getConfiguration().getInt("num.map.tasks", 0);
    List<InputSplit> inputSplits = Lists.newArrayListWithCapacity(numMapTasks);

    for (int i = 0; i < numMapTasks; i++) {
        inputSplits.add(new IntInputSplit(i));
    }

    return inputSplits;
}
 
Example 28
Source Project: incubator-tez   Source File: TeraInputFormat.java    License: Apache License 2.0 5 votes vote down vote up
public void initialize(InputSplit split, TaskAttemptContext context) 
    throws IOException, InterruptedException {
  Path p = ((FileSplit)split).getPath();
  FileSystem fs = p.getFileSystem(context.getConfiguration());
  in = fs.open(p);
  long start = ((FileSplit)split).getStart();
  // find the offset to start at a record boundary
  offset = (RECORD_LENGTH - (start % RECORD_LENGTH)) % RECORD_LENGTH;
  in.seek(start + offset);
  length = ((FileSplit)split).getLength();
}
 
Example 29
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
    reader.initialize(inputSplit, taskAttemptContext);

    Configuration conf = taskAttemptContext.getConfiguration();
    if (conf.get(Constants.GREMLIN_HADOOP_GRAPH_FILTER, null) != null) {
        graphFilter = VertexProgramHelper.deserialize(ConfUtil.makeApacheConfiguration(conf),
                                                      Constants.GREMLIN_HADOOP_GRAPH_FILTER);
    }
}
 
Example 30
Source Project: datawave   Source File: AbstractEventRecordReader.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void initialize(final InputSplit genericSplit, final TaskAttemptContext context) throws IOException {
    initializeEvent(context.getConfiguration());
    
    if (genericSplit instanceof FileSplit) {
        final Path p = ((FileSplit) genericSplit).getPath();
        final FileSystem sys = p.getFileSystem(context.getConfiguration());
        
        rawFileName = p.toString();
        rawFileTimeStamp = sys.getFileStatus(p).getModificationTime();
    }
}