Java Code Examples for org.apache.hadoop.io.SequenceFile#createWriter()

The following examples show how to use org.apache.hadoop.io.SequenceFile#createWriter() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleSequenceFileBytesCreator.java    From marklogic-contentpump with Apache License 2.0 6 votes vote down vote up
public static void main(String args[]) throws IOException {
    System.out.println("Sequence File Creator");
    String uri = args[0]; // output sequence file name

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(uri), conf);
    Path path = new Path(uri);
    SequenceFile.Writer writer = null;
    SimpleSequenceFileKey key = new SimpleSequenceFileKey();
    SimpleSequenceFileValue<BytesWritable> value = new SimpleSequenceFileValue<BytesWritable>();
    try {
        BytesWritable bw = new BytesWritable();
        byte byteArray[] = {2,3,4};
        bw.set(byteArray, 0, byteArray.length);
        
        writer = SequenceFile.createWriter(fs, conf, path, key.getClass(),
            value.getClass());
        
        key.setDocumentURI(new DocumentURI("ABC"));
        value.setValue(bw);
        writer.append(key, value);
        System.err.println(key.getDocumentURI().getUri() + value);
    } finally {
        IOUtils.closeStream(writer);
    }
}
 
Example 2
Source File: Hdfs.java    From pxf with Apache License 2.0 6 votes vote down vote up
@Override
public void writeSequenceFile(Object[] writableData, String pathToFile)
        throws IOException {
    ReportUtils.startLevel(report, getClass(),
            "Writing Sequence file from "
                    + writableData[0].getClass().getName() + " array to "
                    + pathToFile);
    IntWritable key = new IntWritable();
    Path path = getDatapath(pathToFile);

    // Even though this method is deprecated we need to pass the correct
    // fs for multi hadoop tests
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, config,
            path, key.getClass(), writableData[0].getClass());
    for (int i = 1; i < writableData.length; i++) {
        writer.append(key, writableData[i]);
    }
    writer.close();
    ReportUtils.stopLevel(report);
}
 
Example 3
Source File: SequenceFileUtil.java    From alchemy with Apache License 2.0 6 votes vote down vote up
public static void writeSequenceFile(String path) throws Exception{
	Writer.Option filePath = Writer.file(new Path(path));
	Writer.Option keyClass = Writer.keyClass(IntWritable.class);
	Writer.Option valueClass = Writer.valueClass(Text.class);
	Writer.Option compression = Writer.compression(CompressionType.NONE);
	Writer writer = SequenceFile.createWriter(configuration, filePath, keyClass, valueClass, compression);
	IntWritable key = new IntWritable();
	Text value = new Text("");
	for(int i=0;i<100;i++){
		key.set(i);
		value.set("value_"+i);
		writer.append(key, value);
	}
	writer.hflush();
	writer.close();
}
 
Example 4
Source File: GenWriterThread.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * This is used for verification
 * Each mapper writes one control file
 * control file only contains the base directory written by this mapper
 * and the checksum file path so that we could create a Read mapper which
 * scanned the files under the base directory and verify the checksum of 
 * files with the information given in the checksum file. 
 * @param fs 
 * @param outputPath base directory of mapper
 * @param checksumFile location of checksum file
 * @param name name of control file
 * @throws IOException
 */
private void writeControlFile(FileSystem fs, Path outputPath, 
    Path checksumFile, String name) throws IOException {
  SequenceFile.Writer write = null;
  try {
    Path parentDir = new Path(rtc.input, "filelists");
    if (!fs.exists(parentDir)) {
      fs.mkdirs(parentDir);
    }
    Path controlFile = new Path(parentDir, name);
    write = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
        Text.class, Text.class, CompressionType.NONE);
    write.append(new Text(outputPath.toString()), 
        new Text(checksumFile.toString()));
  } finally {
    if (write != null)
      write.close();
    write = null;
  }
}
 
Example 5
Source File: MetaDataWorker.java    From compiler with Apache License 2.0 6 votes vote down vote up
public void openWriters() {
	long time = System.currentTimeMillis();
	String suffix = id + "-" + time + ".seq";
	while (true) {
		try {
			projectWriter = SequenceFile.createWriter(fileSystem, conf, new Path(base + "/project/" + suffix),
					Text.class, BytesWritable.class, CompressionType.BLOCK);
			astWriter = SequenceFile.createWriter(fileSystem, conf, new Path(base + "/ast/" + suffix),
					LongWritable.class, BytesWritable.class, CompressionType.BLOCK);
			commitWriter = SequenceFile.createWriter(fileSystem, conf, new Path(base + "/commit/" + suffix),
					LongWritable.class, BytesWritable.class, CompressionType.BLOCK);
			contentWriter = SequenceFile.createWriter(fileSystem, conf, new Path(base + "/source/" + suffix),
					LongWritable.class, BytesWritable.class, CompressionType.BLOCK);
			break;
		} catch (Throwable t) {
			t.printStackTrace();
			try {
				Thread.sleep(1000);
			} catch (InterruptedException e) {
			}
		}
	}
}
 
Example 6
Source File: TestTotalOrderPartitioner.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static <T extends WritableComparable<?>> Path writePartitionFile(
    String testname, Configuration conf, T[] splits) throws IOException {
  final FileSystem fs = FileSystem.getLocal(conf);
  final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")
                               ).makeQualified(fs);
  Path p = new Path(testdir, testname + "/_partition.lst");
  TotalOrderPartitioner.setPartitionFile(conf, p);
  conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
  SequenceFile.Writer w = null;
  try {
    w = SequenceFile.createWriter(fs, conf, p,
        splits[0].getClass(), NullWritable.class,
        SequenceFile.CompressionType.NONE);
    for (int i = 0; i < splits.length; ++i) {
      w.append(splits[i], NullWritable.get());
    }
  } finally {
    if (null != w)
      w.close();
  }
  return p;
}
 
Example 7
Source File: InputSampler.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Write a partition file for the given job, using the Sampler provided.
 * Queries the sampler for a sample keyset, sorts by the output key
 * comparator, selects the keys for each rank, and writes to the destination
 * returned from {@link
   org.apache.hadoop.mapred.lib.TotalOrderPartitioner#getPartitionFile}.
 */
@SuppressWarnings("unchecked") // getInputFormat, getOutputKeyComparator
public static <K,V> void writePartitionFile(JobConf job,
    Sampler<K,V> sampler) throws IOException {
  final InputFormat<K,V> inf = (InputFormat<K,V>) job.getInputFormat();
  int numPartitions = job.getNumReduceTasks();
  K[] samples = sampler.getSample(inf, job);
  LOG.info("Using " + samples.length + " samples");
  RawComparator<K> comparator =
    (RawComparator<K>) job.getOutputKeyComparator();
  Arrays.sort(samples, comparator);
  Path dst = new Path(TotalOrderPartitioner.getPartitionFile(job));
  FileSystem fs = dst.getFileSystem(job);
  if (fs.exists(dst)) {
    fs.delete(dst, false);
  }
  SequenceFile.Writer writer = SequenceFile.createWriter(fs, job, dst,
      job.getMapOutputKeyClass(), NullWritable.class);
  NullWritable nullValue = NullWritable.get();
  float stepSize = samples.length / (float) numPartitions;
  int last = -1;
  for(int i = 1; i < numPartitions; ++i) {
    int k = Math.round(stepSize * i);
    while (last >= k && comparator.compare(samples[last], samples[k]) == 0) {
      ++k;
    }
    writer.append(samples[k], nullValue);
    last = k;
  }
  writer.close();
}
 
Example 8
Source File: SequenceFileWriter.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void open(FileSystem fs, Path path) throws IOException {
	super.open(fs, path);
	if (keyClass == null) {
		throw new IllegalStateException("Key Class has not been initialized.");
	}
	if (valueClass == null) {
		throw new IllegalStateException("Value Class has not been initialized.");
	}

	CompressionCodec codec = null;

	Configuration conf = fs.getConf();

	if (!compressionCodecName.equals("None")) {
		CompressionCodecFactory codecFactory = new CompressionCodecFactory(conf);
		codec = codecFactory.getCodecByName(compressionCodecName);
		if (codec == null) {
			throw new RuntimeException("Codec " + compressionCodecName + " not found.");
		}
	}

	// the non-deprecated constructor syntax is only available in recent hadoop versions...
	writer = SequenceFile.createWriter(conf,
			getStream(),
			keyClass,
			valueClass,
			compressionType,
			codec);
}
 
Example 9
Source File: TestFileSystem.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public static void createControlFile(FileSystem fs,
                                     long megaBytes, int numFiles,
                                     long seed) throws Exception {

  LOG.info("creating control file: "+megaBytes+" bytes, "+numFiles+" files");

  Path controlFile = new Path(CONTROL_DIR, "files");
  fs.delete(controlFile, true);
  Random random = new Random(seed);

  SequenceFile.Writer writer =
    SequenceFile.createWriter(fs, conf, controlFile, 
                              UTF8.class, LongWritable.class, CompressionType.NONE);

  long totalSize = 0;
  long maxSize = ((megaBytes / numFiles) * 2) + 1;
  try {
    while (totalSize < megaBytes) {
      UTF8 name = new UTF8(Long.toString(random.nextLong()));

      long size = random.nextLong();
      if (size < 0)
        size = -size;
      size = size % maxSize;

      //LOG.info(" adding: name="+name+" size="+size);

      writer.append(name, new LongWritable(size));

      totalSize += size;
    }
  } finally {
    writer.close();
  }
  LOG.info("created control file for: "+totalSize+" bytes");
}
 
Example 10
Source File: TestDFSIO.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
private void createControlFile(FileSystem fs,
                                long nrBytes, // in bytes
                                int nrFiles
                              ) throws IOException {
  LOG.info("creating control file: "+nrBytes+" bytes, "+nrFiles+" files");

  Path controlDir = getControlDir(config);
  fs.delete(controlDir, true);

  for(int i=0; i < nrFiles; i++) {
    String name = getFileName(i);
    Path controlFile = new Path(controlDir, "in_file_" + name);
    SequenceFile.Writer writer = null;
    try {
      writer = SequenceFile.createWriter(fs, config, controlFile,
                                         Text.class, LongWritable.class,
                                         CompressionType.NONE);
      writer.append(new Text(name), new LongWritable(nrBytes));
    } catch(Exception e) {
      throw new IOException(e.getLocalizedMessage());
    } finally {
      if (writer != null)
        writer.close();
      writer = null;
    }
  }
  LOG.info("created control files for: "+nrFiles+" files");
}
 
Example 11
Source File: SequenceFileBolt.java    From storm-hdfs with Apache License 2.0 5 votes vote down vote up
Path createOutputFile() throws IOException {
    Path p = new Path(this.fsUrl + this.fileNameFormat.getPath(), this.fileNameFormat.getName(this.rotation, System.currentTimeMillis()));
    this.writer = SequenceFile.createWriter(
            this.hdfsConfig,
            SequenceFile.Writer.file(p),
            SequenceFile.Writer.keyClass(this.format.keyClass()),
            SequenceFile.Writer.valueClass(this.format.valueClass()),
            SequenceFile.Writer.compression(this.compressionType, this.codecFactory.getCodecByName(this.compressionCodec))
    );
    return p;
}
 
Example 12
Source File: SequenceFileOutputFormat.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public RecordWriter<K, V> getRecordWriter(
                                        FileSystem ignored, JobConf job,
                                        String name, Progressable progress)
  throws IOException {
  // get the path of the temporary output file 
  Path file = FileOutputFormat.getTaskOutputPath(job, name);
  
  FileSystem fs = file.getFileSystem(job);
  CompressionCodec codec = null;
  CompressionType compressionType = CompressionType.NONE;
  if (getCompressOutput(job)) {
    // find the kind of compression to do
    compressionType = getOutputCompressionType(job);

    // find the right codec
    Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
 DefaultCodec.class);
    codec = ReflectionUtils.newInstance(codecClass, job);
  }
  final SequenceFile.Writer out = 
    SequenceFile.createWriter(fs, job, file,
                              job.getOutputKeyClass(),
                              job.getOutputValueClass(),
                              compressionType,
                              codec,
                              progress);

  return new RecordWriter<K, V>() {

      public void write(K key, V value)
        throws IOException {

        out.append(key, value);
      }

      public void close(Reporter reporter) throws IOException { out.close();}
    };
}
 
Example 13
Source File: MapReduceBackupCopyJob.java    From hbase with Apache License 2.0 5 votes vote down vote up
private SequenceFile.Writer getWriter(Path pathToListFile) throws IOException {
  FileSystem fs = pathToListFile.getFileSystem(conf);
  fs.delete(pathToListFile, false);
  return SequenceFile.createWriter(conf, SequenceFile.Writer.file(pathToListFile),
    SequenceFile.Writer.keyClass(Text.class),
    SequenceFile.Writer.valueClass(CopyListingFileStatus.class),
    SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE));
}
 
Example 14
Source File: MapTask.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void writeSkippedRec(K key, V value) throws IOException{
  if(skipWriter==null) {
    Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
    Path skipFile = new Path(skipDir, getTaskID().toString());
    skipWriter = 
      SequenceFile.createWriter(
          skipFile.getFileSystem(conf), conf, skipFile,
          (Class<K>) createKey().getClass(),
          (Class<V>) createValue().getClass(), 
          CompressionType.BLOCK, getTaskReporter());
  }
  skipWriter.append(key, value);
}
 
Example 15
Source File: FileWriterBase.java    From suro with Apache License 2.0 5 votes vote down vote up
/**
 * Create a new sequence file
 *
 * @param newPath
 * @return
 * @throws java.io.IOException
 */
public SequenceFile.Writer createSequenceFile(String newPath) throws IOException {
    if (codec != null) {
        return SequenceFile.createWriter(
                fs, conf, new Path(newPath),
                Text.class, MessageWritable.class,
                SequenceFile.CompressionType.BLOCK, codec);
    } else {
        return SequenceFile.createWriter(
                fs, conf, new Path(newPath),
                Text.class, MessageWritable.class,
                SequenceFile.CompressionType.NONE, codec);
    }
}
 
Example 16
Source File: MapTask.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void writeSkippedRec(K key, V value) throws IOException{
  if(skipWriter==null) {
    Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
    Path skipFile = new Path(skipDir, getTaskID().toString());
    skipWriter = 
      SequenceFile.createWriter(
          skipFile.getFileSystem(conf), conf, skipFile,
          (Class<K>) createKey().getClass(),
          (Class<V>) createValue().getClass(), 
          CompressionType.BLOCK, getTaskReporter());
  }
  skipWriter.append(key, value);
}
 
Example 17
Source File: PiEstimator.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Run a map/reduce job for estimating Pi.
 *
 * @return the estimated value of Pi
 */
public static BigDecimal estimate(int numMaps, long numPoints, JobConf jobConf
    ) throws IOException {
  //setup job conf
  jobConf.setJobName(PiEstimator.class.getSimpleName());

  jobConf.setInputFormat(SequenceFileInputFormat.class);

  jobConf.setOutputKeyClass(BooleanWritable.class);
  jobConf.setOutputValueClass(LongWritable.class);
  jobConf.setOutputFormat(SequenceFileOutputFormat.class);

  jobConf.setMapperClass(PiMapper.class);
  jobConf.setNumMapTasks(numMaps);

  jobConf.setReducerClass(PiReducer.class);
  jobConf.setNumReduceTasks(1);

  // turn off speculative execution, because DFS doesn't handle
  // multiple writers to the same file.
  jobConf.setSpeculativeExecution(false);

  //setup input/output directories
  final Path inDir = new Path(TMP_DIR, "in");
  final Path outDir = new Path(TMP_DIR, "out");
  FileInputFormat.setInputPaths(jobConf, inDir);
  FileOutputFormat.setOutputPath(jobConf, outDir);

  final FileSystem fs = FileSystem.get(jobConf);
  if (fs.exists(TMP_DIR)) {
    throw new IOException("Tmp directory " + fs.makeQualified(TMP_DIR)
        + " already exists.  Please remove it first.");
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Cannot create input directory " + inDir);
  }

  try {
    //generate an input file for each map task
    for(int i=0; i < numMaps; ++i) {
      final Path file = new Path(inDir, "part"+i);
      final LongWritable offset = new LongWritable(i * numPoints);
      final LongWritable size = new LongWritable(numPoints);
      final SequenceFile.Writer writer = SequenceFile.createWriter(
          fs, jobConf, file,
          LongWritable.class, LongWritable.class, CompressionType.NONE);
      try {
        writer.append(offset, size);
      } finally {
        writer.close();
      }
      System.out.println("Wrote input for Map #"+i);
    }

    //start a map/reduce job
    System.out.println("Starting Job");
    final long startTime = System.currentTimeMillis();
    JobClient.runJob(jobConf);
    final double duration = (System.currentTimeMillis() - startTime)/1000.0;
    System.out.println("Job Finished in " + duration + " seconds");

    //read outputs
    Path inFile = new Path(outDir, "reduce-out");
    LongWritable numInside = new LongWritable();
    LongWritable numOutside = new LongWritable();
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, inFile, jobConf);
    try {
      reader.next(numInside, numOutside);
    } finally {
      reader.close();
    }

    //compute estimated value
    return BigDecimal.valueOf(4).setScale(20)
        .multiply(BigDecimal.valueOf(numInside.get()))
        .divide(BigDecimal.valueOf(numMaps))
        .divide(BigDecimal.valueOf(numPoints));
  } finally {
    fs.delete(TMP_DIR, true);
  }
}
 
Example 18
Source File: StandaloneMemoryStore.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
public void dump(String path) throws IOException {
    
    long nano = System.nanoTime();
    int gts = 0;
    long bytes = 0L;
    
    Configuration conf = new Configuration();
        
    conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
    conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());

    BytesWritable key = new BytesWritable();
    BytesWritable value = new BytesWritable();
    
    CompressionCodec Codec = new DefaultCodec();
    SequenceFile.Writer writer = null;
    SequenceFile.Writer.Option optPath = SequenceFile.Writer.file(new Path(path));
    SequenceFile.Writer.Option optKey = SequenceFile.Writer.keyClass(key.getClass());
    SequenceFile.Writer.Option optVal = SequenceFile.Writer.valueClass(value.getClass());
    SequenceFile.Writer.Option optCom = SequenceFile.Writer.compression(CompressionType.RECORD,  Codec);
    
    writer = SequenceFile.createWriter(conf, optPath, optKey, optVal, optCom);

    TSerializer serializer = new TSerializer(new TCompactProtocol.Factory());
    
    try {
      for (Entry<BigInteger,GTSEncoder> entry: this.series.entrySet()) {
        gts++;
        Metadata metadata = this.directoryClient.getMetadataById(entry.getKey());

        GTSWrapper wrapper = new GTSWrapper();
        wrapper.setMetadata(metadata);        
        
        GTSEncoder encoder = entry.getValue();

        wrapper.setBase(encoder.getBaseTimestamp());
        wrapper.setCount(encoder.getCount());
        
        byte[] data = serializer.serialize(wrapper);
        key.set(data, 0, data.length);
        
        data = encoder.getBytes();
        value.set(data, 0, data.length);

        bytes += key.getLength() + value.getLength();
        
        writer.append(key, value);
      }
/*      
      for (Entry<BigInteger,Metadata> entry: this.metadatas.entrySet()) {
        gts++;
        byte[] data = serializer.serialize(entry.getValue());
        key.set(data, 0, data.length);
        
        GTSEncoder encoder = this.series.get(entry.getKey());
        data = encoder.getBytes();
        value.set(data, 0, data.length);

        bytes += key.getLength() + value.getLength();
        
        writer.append(key, value);
      }
*/      
    } catch (IOException ioe) {
      ioe.printStackTrace();
      throw ioe;
    } catch (Exception e) {
      e.printStackTrace();
      throw new IOException(e);
    }
    
    writer.close();

    nano = System.nanoTime() - nano;
    
    System.out.println("Dumped " + gts + " GTS (" + bytes + " bytes) in " + (nano / 1000000.0D) + " ms.");
  }
 
Example 19
Source File: ResourceOutputStreamTest.java    From HiveRunner with Apache License 2.0 4 votes vote down vote up
private SequenceFile.Writer createSequenceFileWriter(OutputStream resourceOutputStream) throws IOException {
    return SequenceFile.createWriter(new Configuration(),
            SequenceFile.Writer.stream(new FSDataOutputStream(resourceOutputStream, null)),
            SequenceFile.Writer.keyClass(NullWritable.class),
            SequenceFile.Writer.valueClass(Text.class));
}
 
Example 20
Source File: GenericMRLoadGenerator.java    From big-c with Apache License 2.0 4 votes vote down vote up
public int run(String [] argv) throws Exception {
  JobConf job = new JobConf(getConf());
  job.setJarByClass(GenericMRLoadGenerator.class);
  job.setMapperClass(SampleMapper.class);
  job.setReducerClass(SampleReducer.class);
  if (!parseArgs(argv, job)) {
    return -1;
  }

  if (null == FileOutputFormat.getOutputPath(job)) {
    // No output dir? No writes
    job.setOutputFormat(NullOutputFormat.class);
  }

  if (0 == FileInputFormat.getInputPaths(job).length) {
    // No input dir? Generate random data
    System.err.println("No input path; ignoring InputFormat");
    confRandom(job);
  } else if (null != job.getClass(
     org.apache.hadoop.mapreduce.GenericMRLoadGenerator.INDIRECT_INPUT_FORMAT,
     null)) {
    // specified IndirectInputFormat? Build src list
    JobClient jClient = new JobClient(job);
    Path tmpDir = new Path(jClient.getFs().getHomeDirectory(), ".staging");
    Random r = new Random();
    Path indirInputFile = new Path(tmpDir,
        Integer.toString(r.nextInt(Integer.MAX_VALUE), 36) + "_files");
    job.set(
      org.apache.hadoop.mapreduce.GenericMRLoadGenerator.INDIRECT_INPUT_FILE,
      indirInputFile.toString());
    SequenceFile.Writer writer = SequenceFile.createWriter(
        tmpDir.getFileSystem(job), job, indirInputFile,
        LongWritable.class, Text.class,
        SequenceFile.CompressionType.NONE);
    try {
      for (Path p : FileInputFormat.getInputPaths(job)) {
        FileSystem fs = p.getFileSystem(job);
        Stack<Path> pathstack = new Stack<Path>();
        pathstack.push(p);
        while (!pathstack.empty()) {
          for (FileStatus stat : fs.listStatus(pathstack.pop())) {
            if (stat.isDirectory()) {
              if (!stat.getPath().getName().startsWith("_")) {
                pathstack.push(stat.getPath());
              }
            } else {
              writer.sync();
              writer.append(new LongWritable(stat.getLen()),
                  new Text(stat.getPath().toUri().toString()));
            }
          }
        }
      }
    } finally {
      writer.close();
    }
  }

  Date startTime = new Date();
  System.out.println("Job started: " + startTime);
  JobClient.runJob(job);
  Date endTime = new Date();
  System.out.println("Job ended: " + endTime);
  System.out.println("The job took " +
                     (endTime.getTime() - startTime.getTime()) /1000 +
                     " seconds.");

  return 0;
}