org.apache.hadoop.io.file.tfile.TFile.Writer Java Examples

The following examples show how to use org.apache.hadoop.io.file.tfile.TFile.Writer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestTFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example #2
Source File: TestTFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example #3
Source File: TestTFileByteArrays.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example #4
Source File: TestTFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example #5
Source File: TestTFileSplit.java    From RDFS with Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example #6
Source File: TestTFileByteArrays.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example #7
Source File: TestTFileByteArrays.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example #8
Source File: TestTFileSplit.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example #9
Source File: TestTFileSplit.java    From big-c with Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example #10
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example #11
Source File: TestTFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example #12
Source File: TestTFileByteArrays.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example #13
Source File: TestTFileSplit.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example #14
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example #15
Source File: TestTFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example #16
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private int writeLargeRecords(Writer writer, int start, int n)
    throws IOException {
  byte[] value = new byte[largeVal];
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), value);
    writer.append(key.getBytes(), value);
  }
  return (start + n);
}
 
Example #17
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private int writeSomeRecords(Writer writer, int start, int n)
    throws IOException {
  String value = "value";
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), (value + key).getBytes());
    writer.append(key.getBytes(), (value + key).getBytes());
  }
  return (start + n);
}
 
Example #18
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void someTestingWithMetaBlock(Writer writer, String compression)
    throws IOException {
  DataOutputStream dout = null;
  writeNumMetablocks(writer, compression, 10);
  try {
    dout = writer.prepareMetaBlock("TfileMeta1", compression);
    assertTrue(false);
  }
  catch (MetaBlockAlreadyExists me) {
    // avoid this exception
  }
  dout = writer.prepareMetaBlock("TFileMeta100", compression);
  dout.close();
}
 
Example #19
Source File: TestTFileComparators.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void testFailureBadComparatorNames() throws IOException {
  try {
    writer = new Writer(out, BLOCK_SIZE, compression, "badcmp", conf);
    Assert.fail("Failed to catch unsupported comparator names");
  }
  catch (Exception e) {
    // noop, expecting exceptions
    e.printStackTrace();
  }
}
 
Example #20
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void writeEmptyRecords(Writer writer, int n) throws IOException {
  byte[] key = new byte[0];
  byte[] value = new byte[0];
  for (int i = 0; i < n; i++) {
    writer.append(key, value);
  }
}
 
Example #21
Source File: TestTFile.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private int writePrepWithUnkownLength(Writer writer, int start, int n)
    throws IOException {
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(-1);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    String value = "value" + localKey;
    out = writer.prepareAppendValue(-1);
    out.write(value.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example #22
Source File: TestTFileComparators.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public void testFailureBadJClasses() throws IOException {
  try {
    writer =
        new Writer(out, BLOCK_SIZE, compression,
            "jclass:org.apache.hadoop.io.file.tfile.Chunk", conf);
    Assert.fail("Failed to catch unsupported comparator names");
  }
  catch (Exception e) {
    // noop, expecting exceptions
    e.printStackTrace();
  }
}
 
Example #23
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void writeRecords(Writer writer) throws IOException {
  writeEmptyRecords(writer, 10);
  int ret = writeSomeRecords(writer, 0, 100);
  ret = writeLargeRecords(writer, ret, 1);
  ret = writePrepWithKnownLength(writer, ret, 40);
  ret = writePrepWithUnkownLength(writer, ret, 50);
  writer.close();
}
 
Example #24
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int writePrepWithUnkownLength(Writer writer, int start, int n)
    throws IOException {
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(-1);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    String value = "value" + localKey;
    out = writer.prepareAppendValue(-1);
    out.write(value.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example #25
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void writeEmptyRecords(Writer writer, int n) throws IOException {
  byte[] key = new byte[0];
  byte[] value = new byte[0];
  for (int i = 0; i < n; i++) {
    writer.append(key, value);
  }
}
 
Example #26
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int writeLargeRecords(Writer writer, int start, int n)
    throws IOException {
  byte[] value = new byte[largeVal];
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), value);
    writer.append(key.getBytes(), value);
  }
  return (start + n);
}
 
Example #27
Source File: TestTFile.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int writeSomeRecords(Writer writer, int start, int n)
    throws IOException {
  String value = "value";
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), (value + key).getBytes());
    writer.append(key.getBytes(), (value + key).getBytes());
  }
  return (start + n);
}
 
Example #28
Source File: TFileRecordWriter.java    From spork with Apache License 2.0 5 votes vote down vote up
/**
 * 
 */
public TFileRecordWriter(Path file, String codec, Configuration conf)
                throws IOException {
    FileSystem fs = file.getFileSystem(conf);
    fileOut = fs.create(file, false);
    writer = new Writer(fileOut, 1024 * 1024, codec, null, conf);
}
 
Example #29
Source File: TFileWriter.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
public TFileWriter(FSDataOutputStream stream, int minBlockSize, String compressName,
    String comparator, Configuration conf) throws IOException
{
  this.fsdos = stream;
  writer = new Writer(stream, minBlockSize, compressName, comparator, conf);

}
 
Example #30
Source File: TestTFileComparators.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testFailureBadJClasses() throws IOException {
  try {
    writer =
        new Writer(out, BLOCK_SIZE, compression,
            "jclass:org.apache.hadoop.io.file.tfile.Chunk", conf);
    Assert.fail("Failed to catch unsupported comparator names");
  }
  catch (Exception e) {
    // noop, expecting exceptions
    e.printStackTrace();
  }
}