org.apache.hadoop.io.SequenceFile.CompressionType Java Examples

The following examples show how to use org.apache.hadoop.io.SequenceFile.CompressionType. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSequenceFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void writeMetadataTest(FileSystem fs, int count, int seed, Path file, 
                                      CompressionType compressionType, CompressionCodec codec, SequenceFile.Metadata metadata)
  throws IOException {
  fs.delete(file, true);
  LOG.info("creating " + count + " records with metadata and with " + compressionType +
           " compression");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              RandomDatum.class, RandomDatum.class, compressionType, codec, null, metadata);
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for (int i = 0; i < count; i++) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();

    writer.append(key, value);
  }
  writer.close();
}
 
Example #2
Source File: TestBucketWriter.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testSizeRoller() throws IOException, InterruptedException {
  int maxBytes = 300;
  MockHDFSWriter hdfsWriter = new MockHDFSWriter();
  BucketWriter bucketWriter = new BucketWriter(0, maxBytes, 0, 0, ctx,
      "/tmp", "file", "", ".tmp", null, null, SequenceFile.CompressionType.NONE,
      hdfsWriter, timedRollerPool, null,
      new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
      0, null, null, 30000, Executors.newSingleThreadExecutor());

  Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
  for (int i = 0; i < 1000; i++) {
    bucketWriter.append(e);
  }

  logger.info("Number of events written: {}", hdfsWriter.getEventsWritten());
  logger.info("Number of bytes written: {}", hdfsWriter.getBytesWritten());
  logger.info("Number of files opened: {}", hdfsWriter.getFilesOpened());

  Assert.assertEquals("events written", 1000, hdfsWriter.getEventsWritten());
  Assert.assertEquals("bytes written", 3000, hdfsWriter.getBytesWritten());
  Assert.assertEquals("files opened", 10, hdfsWriter.getFilesOpened());
}
 
Example #3
Source File: TestCodec.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
Example #4
Source File: GenWriterThread.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * This is used for verification
 * Each mapper writes one control file
 * control file only contains the base directory written by this mapper
 * and the checksum file path so that we could create a Read mapper which
 * scanned the files under the base directory and verify the checksum of 
 * files with the information given in the checksum file. 
 * @param fs 
 * @param outputPath base directory of mapper
 * @param checksumFile location of checksum file
 * @param name name of control file
 * @throws IOException
 */
private void writeControlFile(FileSystem fs, Path outputPath, 
    Path checksumFile, String name) throws IOException {
  SequenceFile.Writer write = null;
  try {
    Path parentDir = new Path(rtc.input, "filelists");
    if (!fs.exists(parentDir)) {
      fs.mkdirs(parentDir);
    }
    Path controlFile = new Path(parentDir, name);
    write = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
        Text.class, Text.class, CompressionType.NONE);
    write.append(new Text(outputPath.toString()), 
        new Text(checksumFile.toString()));
  } finally {
    if (write != null)
      write.close();
    write = null;
  }
}
 
Example #5
Source File: ConfigurationBuilder.java    From mrgeo with Apache License 2.0 6 votes vote down vote up
public Configuration build()
{
  when(configuration.getInt(ZOOM_LEVEL, 1)).thenReturn(zoomLevel);
  when(configuration.getInt(TILE_SIZE, MrGeoConstants.MRGEO_MRS_TILESIZE_DEFAULT_INT)).thenReturn(tileSize);
  when(configuration.get(BOUNDS)).thenReturn(boundsString);
  when(configuration.getBoolean(FILE_OUTPT_FORMAT_COMPRESS, false)).thenReturn(compressOutput);
  when(configuration.get(FILE_OUTPUT_COMPRESSION_TYPE, CompressionType.RECORD.toString()))
      .thenReturn(outputCompressionType);
  when(configuration.get(FILE_OUTPUT_COMPRESSION_CODEC)).thenReturn(outputCompressionCodec);
  when(configuration.get(FILE_OUTPUT_PATH)).thenReturn(outputFilePath);

//        when(configuration.getClassByName(anyString())).thenAnswer(new Answer<Class>() {
//
//            @Override
//            public Class answer(InvocationOnMock invocationOnMock) throws Throwable {
//                return Class.forName(invocationOnMock.getArguments()[0].toString());
//            }
//        });

  return configuration;
}
 
Example #6
Source File: GenWriterThread.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Each mapper will write one checksum file. 
 * checksum file contains N pairs where N is the number of threads
 * Each pair is has two entries: outputPath and checksum
 * outputPath is the directory of files written by the thread
 * checksum is the CRC checksum of all files under that directory
 * @param name checksum file name
 * @param threads array of writer threads
 * @return checksum file path
 * @throws IOException
 */
private Path writeChecksumFile(FileSystem fs, String name, 
    GenThread[] threads) throws IOException {
  Path checksumFile = new Path(rtc.output_dir, name + ".checksum");
  SequenceFile.Writer write = null;
  write = SequenceFile.createWriter(fs, fs.getConf(), checksumFile,
      Text.class, Text.class, CompressionType.NONE);
  try {
    for (GenThread rawThread: threads) {
      GenWriterThread thread = (GenWriterThread)rawThread;
      write.append(new Text(thread.outputPath.toString()), 
          new Text(Long.toString(thread.dc.getDirectoryChecksum())));
    } 
  } finally {
    if (write != null)
      write.close();
    write = null;
  }
  return checksumFile;
}
 
Example #7
Source File: SnapshotIndexDeletionPolicy.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
private synchronized void storeGenerations() throws IOException {
  FileSystem fileSystem = _path.getFileSystem(_configuration);
  FileStatus[] listStatus = fileSystem.listStatus(_path);
  SortedSet<FileStatus> existing = new TreeSet<FileStatus>(Arrays.asList(listStatus));
  long currentFile;
  if (!existing.isEmpty()) {
    FileStatus last = existing.last();
    currentFile = Long.parseLong(last.getPath().getName());
  } else {
    currentFile = 0;
  }
  Path path = new Path(_path, buffer(currentFile + 1));
  LOG.info("Creating new snapshot file [{0}]", path);
  FSDataOutputStream outputStream = fileSystem.create(path, false);
  Writer writer = SequenceFile.createWriter(_configuration, outputStream, Text.class, LongWritable.class,
      CompressionType.NONE, null);
  for (Entry<String, Long> e : _namesToGenerations.entrySet()) {
    writer.append(new Text(e.getKey()), new LongWritable(e.getValue()));
  }
  writer.close();
  outputStream.close();
  cleanupOldFiles(fileSystem, existing);
}
 
Example #8
Source File: JHLogAnalyzer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void run() {
  try {
    for(int i=start; i < end; i++) {
      String name = getFileName(i);
      Path controlFile = new Path(INPUT_DIR, "in_file_" + name);
      SequenceFile.Writer writer = null;
      try {
        writer = SequenceFile.createWriter(fs, fs.getConf(), controlFile,
                                           Text.class, LongWritable.class,
                                           CompressionType.NONE);
        String logFile = jhLogFiles[i].getPath().toString();
        writer.append(new Text(logFile), new LongWritable(0));
      } catch(Exception e) {
        throw new IOException(e);
      } finally {
        if (writer != null)
          writer.close();
        writer = null;
      }
    }
  } catch(IOException ex) {
    LOG.error("FileCreateDaemon failed.", ex);
  }
  numFinishedThreads++;
}
 
Example #9
Source File: DFSGeneralTest.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void control(JobConf fsConfig, String fileName)
    throws IOException {
  String name = fileName;
  FileSystem fs = FileSystem.get(fsConfig);

  SequenceFile.Writer write = null;
  for (int i = 0; i < nmaps; i++) {
    try {
      Path controlFile = new Path(dfs_input, name + i);
      write = SequenceFile.createWriter(fs, fsConfig, controlFile,
          Text.class, Text.class, CompressionType.NONE);
      write.append(new Text(name + i), new Text(workdir));
    } finally {
      if (write != null)
        write.close();
      write = null;
    }
  }
}
 
Example #10
Source File: TestSequenceFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void writeMetadataTest(FileSystem fs, int count, int seed, Path file, 
                                      CompressionType compressionType, CompressionCodec codec, SequenceFile.Metadata metadata)
  throws IOException {
  fs.delete(file, true);
  LOG.info("creating " + count + " records with metadata and with " + compressionType +
           " compression");
  SequenceFile.Writer writer = 
    SequenceFile.createWriter(fs, conf, file, 
                              RandomDatum.class, RandomDatum.class, compressionType, codec, null, metadata);
  RandomDatum.Generator generator = new RandomDatum.Generator(seed);
  for (int i = 0; i < count; i++) {
    generator.next();
    RandomDatum key = generator.getKey();
    RandomDatum value = generator.getValue();

    writer.append(key, value);
  }
  writer.close();
}
 
Example #11
Source File: TestSequenceFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public void testRecursiveSeqFileCreate() throws IOException {
  FileSystem fs = FileSystem.getLocal(conf);
  Path name = new Path(new Path(System.getProperty("test.build.data","."),
      "recursiveCreateDir") , "file");
  boolean createParent = false;

  try {
    SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
        RandomDatum.class, 512, (short) 1, 4096, createParent,
        CompressionType.NONE, null, new Metadata());
    fail("Expected an IOException due to missing parent");
  } catch (IOException ioe) {
    // Expected
  }

  createParent = true;
  SequenceFile.createWriter(fs, conf, name, RandomDatum.class,
      RandomDatum.class, 512, (short) 1, 4096, createParent,
      CompressionType.NONE, null, new Metadata());
  // should succeed, fails if exception thrown
}
 
Example #12
Source File: TestBucketWriter.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Test
public void testFileSuffixNotGiven() throws IOException, InterruptedException {
    final int ROLL_INTERVAL = 1000; // seconds. Make sure it doesn't change in course of test
    final String suffix = null;

    MockHDFSWriter hdfsWriter = new MockHDFSWriter();
    BucketWriter bucketWriter = new BucketWriter(ROLL_INTERVAL, 0, 0, 0, ctx,
        "/tmp", "file", "", ".tmp", suffix, null, SequenceFile.CompressionType.NONE, hdfsWriter,
        timedRollerPool, null,
        new SinkCounter("test-bucket-writer-" + System.currentTimeMillis()),
        0, null, null, 30000, Executors.newSingleThreadExecutor());

    // Need to override system time use for test so we know what to expect
    final long testTime = System.currentTimeMillis();
    Clock testClock = new Clock() {
        public long currentTimeMillis() {
            return testTime;
        }
    };
    bucketWriter.setClock(testClock);

    Event e = EventBuilder.withBody("foo", Charsets.UTF_8);
    bucketWriter.append(e);

    Assert.assertTrue("Incorrect suffix", hdfsWriter.getOpenedFilePath().endsWith(Long.toString(testTime+1) + ".tmp"));
}
 
Example #13
Source File: SimpleSequenceFileCompressCreator.java    From marklogic-contentpump with Apache License 2.0 5 votes vote down vote up
public static void main(String args[]) throws Exception {
    System.out.println("Sequence File Creator");
    String uri = args[0]; // output sequence file name
    String filePath = args[1]; // text file to read from; Odd line is key,
                               // even line is value

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(uri), conf);
    Path path = new Path(uri);
    SequenceFile.Writer writer = null;
    SimpleSequenceFileKey key = new SimpleSequenceFileKey();

    String line = null;

    try (BufferedReader buffer = 
             new BufferedReader(new FileReader(filePath))) {
        SimpleSequenceFileValue<Text> value = 
             new SimpleSequenceFileValue<Text>();
        writer = SequenceFile.createWriter(fs, conf, path, key.getClass(),
            value.getClass(), CompressionType.RECORD, new GzipCodec());
        while ((line = buffer.readLine()) != null) {
            key.setDocumentURI(new DocumentURI(line));
            if ((line = buffer.readLine()) == null) {
                break;
            }
            value.setValue(new Text(line));
            writer.append(key, value);
        }
    } finally {
        IOUtils.closeStream(writer);
    }
}
 
Example #14
Source File: BloomMapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    Class<? extends WritableComparable> keyClass,
    Class valClass, CompressionType compress,
    Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass), 
       compression(compress), progressable(progress));
}
 
Example #15
Source File: MapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create the named map using the named key comparator. 
 * @deprecated Use Writer(Configuration, Path, Option...) instead.
 */
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
              WritableComparator comparator, Class valClass,
              SequenceFile.CompressionType compress, CompressionCodec codec,
              Progressable progress) throws IOException {
  this(conf, new Path(dirName), comparator(comparator),
       valueClass(valClass), compression(compress, codec),
       progressable(progress));
}
 
Example #16
Source File: BloomMapFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    WritableComparator comparator, Class valClass, CompressionType compress)
    throws IOException {
  this(conf, new Path(dirName), comparator(comparator), 
       valueClass(valClass), compression(compress));
}
 
Example #17
Source File: ReduceTask.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void writeSkippedRec(KEY key, VALUE value) throws IOException{
  if(skipWriter==null) {
    Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
    Path skipFile = new Path(skipDir, getTaskID().toString());
    skipWriter = SequenceFile.createWriter(
          skipFile.getFileSystem(conf), conf, skipFile,
          keyClass, valClass, 
          CompressionType.BLOCK, reporter);
  }
  skipWriter.append(key, value);
}
 
Example #18
Source File: MapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Create the named map for keys of the named class. 
 * @deprecated Use Writer(Configuration, Path, Option...) instead.
 */
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
              Class<? extends WritableComparable> keyClass, Class valClass,
              CompressionType compress, CompressionCodec codec,
              Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass),
       compression(compress, codec), progressable(progress));
}
 
Example #19
Source File: ReduceTask.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void writeSkippedRec(KEY key, VALUE value) throws IOException{
  if(skipWriter==null) {
    Path skipDir = SkipBadRecords.getSkipOutputPath(conf);
    Path skipFile = new Path(skipDir, getTaskID().toString());
    skipWriter = SequenceFile.createWriter(
          skipFile.getFileSystem(conf), conf, skipFile,
          keyClass, valClass, 
          CompressionType.BLOCK, reporter);
  }
  skipWriter.append(key, value);
}
 
Example #20
Source File: HadoopExternalSorter.java    From beam with Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the hadoop sorter. Does some local file system setup, and is somewhat expensive
 * (~20 ms on local machine). Only executed when necessary.
 */
private void initHadoopSorter() throws IOException {
  if (!initialized) {
    tempDir = new Path(options.getTempLocation(), "tmp" + UUID.randomUUID().toString());
    paths = new Path[] {new Path(tempDir, "test.seq")};

    JobConf conf = new JobConf();
    // Sets directory for intermediate files created during merge of merge sort
    conf.set("io.seqfile.local.dir", tempDir.toUri().getPath());

    writer =
        SequenceFile.createWriter(
            conf,
            Writer.valueClass(BytesWritable.class),
            Writer.keyClass(BytesWritable.class),
            Writer.file(paths[0]),
            Writer.compression(CompressionType.NONE));

    FileSystem fs = FileSystem.getLocal(conf);
    // Directory has to exist for Hadoop to recognize it as deletable on exit
    fs.mkdirs(tempDir);
    fs.deleteOnExit(tempDir);

    sorter =
        new SequenceFile.Sorter(
            fs, new BytesWritable.Comparator(), BytesWritable.class, BytesWritable.class, conf);
    sorter.setMemory(options.getMemoryMB() * 1024 * 1024);

    initialized = true;
  }
}
 
Example #21
Source File: SequenceFileOutputFormat.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Set the {@link CompressionType} for the output {@link SequenceFile}.
 * @param conf the {@link JobConf} to modify
 * @param style the {@link CompressionType} for the output
 *              {@link SequenceFile} 
 */
public static void setOutputCompressionType(JobConf conf, 
                                          CompressionType style) {
  setCompressOutput(conf, true);
  conf.set(org.apache.hadoop.mapreduce.lib.output.
    FileOutputFormat.COMPRESS_TYPE, style.toString());
}
 
Example #22
Source File: BloomMapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    WritableComparator comparator, Class valClass, CompressionType compress)
    throws IOException {
  this(conf, new Path(dirName), comparator(comparator), 
       valueClass(valClass), compression(compress));
}
 
Example #23
Source File: HdfsMrsPyramidOutputFormat.java    From mrgeo with Apache License 2.0 5 votes vote down vote up
protected MapFile.Writer createMapFileWriter(TaskAttemptContext context, CompressionCodec codec,
    CompressionType compressionType, Path file) throws IOException
{
  return new MapFile.Writer(context.getConfiguration(), file,
      MapFile.Writer.keyClass(context.getOutputKeyClass().asSubclass(WritableComparable.class)),
      MapFile.Writer.valueClass(context.getOutputValueClass().asSubclass(Writable.class)),
      MapFile.Writer.compression(compressionType, codec),
      MapFile.Writer.progressable(context));
}
 
Example #24
Source File: SequenceFileOutputFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Set the {@link CompressionType} for the output {@link SequenceFile}.
 * @param job the {@link Job} to modify
 * @param style the {@link CompressionType} for the output
 *              {@link SequenceFile} 
 */
public static void setOutputCompressionType(Job job, 
                                          CompressionType style) {
  setCompressOutput(job, true);
  job.getConfiguration().set(FileOutputFormat.COMPRESS_TYPE, 
                             style.toString());
}
 
Example #25
Source File: BloomMapFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    Class<? extends WritableComparable> keyClass,
    Class<? extends Writable> valClass, CompressionType compress,
    CompressionCodec codec, Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass), 
       compression(compress, codec), progressable(progress));
}
 
Example #26
Source File: SequenceFileOutputFormat.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public RecordWriter<K, V> getRecordWriter(
                                        FileSystem ignored, JobConf job,
                                        String name, Progressable progress)
  throws IOException {
  // get the path of the temporary output file 
  Path file = FileOutputFormat.getTaskOutputPath(job, name);
  
  FileSystem fs = file.getFileSystem(job);
  CompressionCodec codec = null;
  CompressionType compressionType = CompressionType.NONE;
  if (getCompressOutput(job)) {
    // find the kind of compression to do
    compressionType = getOutputCompressionType(job);

    // find the right codec
    Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job,
 DefaultCodec.class);
    codec = ReflectionUtils.newInstance(codecClass, job);
  }
  final SequenceFile.Writer out = 
    SequenceFile.createWriter(fs, job, file,
                              job.getOutputKeyClass(),
                              job.getOutputValueClass(),
                              compressionType,
                              codec,
                              progress);

  return new RecordWriter<K, V>() {

      public void write(K key, V value)
        throws IOException {

        out.append(key, value);
      }

      public void close(Reporter reporter) throws IOException { out.close();}
    };
}
 
Example #27
Source File: SequenceFileAsBinaryOutputFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override 
public void checkOutputSpecs(JobContext job) throws IOException {
  super.checkOutputSpecs(job);
  if (getCompressOutput(job) && 
      getOutputCompressionType(job) == CompressionType.RECORD ) {
    throw new InvalidJobConfException("SequenceFileAsBinaryOutputFormat "
      + "doesn't support Record Compression" );
  }
}
 
Example #28
Source File: BloomMapFile.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    WritableComparator comparator, Class valClass,
    CompressionType compress, CompressionCodec codec, Progressable progress)
    throws IOException {
  this(conf, new Path(dirName), comparator(comparator), 
       valueClass(valClass), compression(compress, codec), 
       progressable(progress));
}
 
Example #29
Source File: TestArrayFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** 
 * test on {@link ArrayFile.Reader} iteration methods
 * <pre> 
 * {@code next(), seek()} in and out of range.
 * </pre>
 */
public void testArrayFileIteration() {
  int SIZE = 10;
  Configuration conf = new Configuration();    
  try {
    FileSystem fs = FileSystem.get(conf);
    ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, TEST_FILE, 
        LongWritable.class, CompressionType.RECORD, defaultProgressable);
    assertNotNull("testArrayFileIteration error !!!", writer);
    
    for (int i = 0; i < SIZE; i++)
      writer.append(new LongWritable(i));
    
    writer.close();
    
    ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
    LongWritable nextWritable = new LongWritable(0);
    
    for (int i = 0; i < SIZE; i++) {
      nextWritable = (LongWritable)reader.next(nextWritable);
      assertEquals(nextWritable.get(), i);
    }
      
    assertTrue("testArrayFileIteration seek error !!!",
        reader.seek(new LongWritable(6)));
    nextWritable = (LongWritable) reader.next(nextWritable);
    assertTrue("testArrayFileIteration error !!!", reader.key() == 7);
    assertTrue("testArrayFileIteration error !!!",
        nextWritable.equals(new LongWritable(7)));
    assertFalse("testArrayFileIteration error !!!",
        reader.seek(new LongWritable(SIZE + 5)));
    reader.close();
  } catch (Exception ex) {
    fail("testArrayFileWriterConstruction error !!!");
  }
}
 
Example #30
Source File: BloomMapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Deprecated
public Writer(Configuration conf, FileSystem fs, String dirName,
    Class<? extends WritableComparable> keyClass,
    Class<? extends Writable> valClass, CompressionType compress,
    CompressionCodec codec, Progressable progress) throws IOException {
  this(conf, new Path(dirName), keyClass(keyClass), valueClass(valClass), 
       compression(compress, codec), progressable(progress));
}