Java Code Examples for org.apache.hadoop.io.file.tfile.TFile.Writer

The following examples show how to use org.apache.hadoop.io.file.tfile.TFile.Writer. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example 2
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example 3
Source Project: hadoop   Source File: TestTFileSplit.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example 4
Source Project: hadoop   Source File: TestTFileByteArrays.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example 5
Source Project: big-c   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example 6
Source Project: big-c   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example 7
Source Project: big-c   Source File: TestTFileSplit.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example 8
Source Project: big-c   Source File: TestTFileByteArrays.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example 9
Source Project: hadoop-gpu   Source File: TestTFileByteArrays.java    License: Apache License 2.0 6 votes vote down vote up
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  }
  catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example 10
Source Project: RDFS   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example 11
Source Project: RDFS   Source File: TestTFileSplit.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example 12
Source Project: RDFS   Source File: TestTFileByteArrays.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testFailureFileWriteNotAt0Position() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  out.write(123);

  try {
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
    Assert.fail("Failed to catch file write not at position 0.");
  } catch (Exception e) {
    // noop, expecting exceptions
  }
  closeOutput();
}
 
Example 13
Source Project: hadoop-gpu   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
private int writePrepWithKnownLength(Writer writer, int start, int n)
    throws IOException {
  // get the length of the key
  String key = String.format(localFormatter, start);
  int keyLen = key.getBytes().length;
  String value = "value" + key;
  int valueLen = value.getBytes().length;
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(keyLen);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    out = writer.prepareAppendValue(valueLen);
    String localValue = "value" + localKey;
    out.write(localValue.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example 14
Source Project: hadoop-gpu   Source File: TestTFileSplit.java    License: Apache License 2.0 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example 15
Source Project: hadoop-gpu   Source File: TestTFile.java    License: Apache License 2.0 6 votes vote down vote up
void unsortedWithSomeCodec(String codec) throws IOException {
  Path uTfile = new Path(ROOT, "unsorted.tfile");
  FSDataOutputStream fout = createFSOutput(uTfile);
  Writer writer = new Writer(fout, minBlockSize, codec, null, conf);
  writeRecords(writer);
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(uTfile);
  Reader reader =
      new Reader(fs.open(uTfile), fs.getFileStatus(uTfile).getLen(), conf);

  Scanner scanner = reader.createScanner();
  readAllRecords(scanner);
  scanner.close();
  reader.close();
  fin.close();
  fs.delete(uTfile, true);
}
 
Example 16
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private int writeSomeRecords(Writer writer, int start, int n)
    throws IOException {
  String value = "value";
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), (value + key).getBytes());
    writer.append(key.getBytes(), (value + key).getBytes());
  }
  return (start + n);
}
 
Example 17
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private int writeLargeRecords(Writer writer, int start, int n)
    throws IOException {
  byte[] value = new byte[largeVal];
  for (int i = start; i < (start + n); i++) {
    String key = String.format(localFormatter, i);
    writer.append(key.getBytes(), value);
    writer.append(key.getBytes(), value);
  }
  return (start + n);
}
 
Example 18
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private void writeEmptyRecords(Writer writer, int n) throws IOException {
  byte[] key = new byte[0];
  byte[] value = new byte[0];
  for (int i = 0; i < n; i++) {
    writer.append(key, value);
  }
}
 
Example 19
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private int writePrepWithUnkownLength(Writer writer, int start, int n)
    throws IOException {
  for (int i = start; i < (start + n); i++) {
    DataOutputStream out = writer.prepareAppendKey(-1);
    String localKey = String.format(localFormatter, i);
    out.write(localKey.getBytes());
    out.close();
    String value = "value" + localKey;
    out = writer.prepareAppendValue(-1);
    out.write(value.getBytes());
    out.close();
  }
  return (start + n);
}
 
Example 20
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private void writeRecords(Writer writer) throws IOException {
  writeEmptyRecords(writer, 10);
  int ret = writeSomeRecords(writer, 0, 100);
  ret = writeLargeRecords(writer, ret, 1);
  ret = writePrepWithKnownLength(writer, ret, 40);
  ret = writePrepWithUnkownLength(writer, ret, 50);
  writer.close();
}
 
Example 21
Source Project: hadoop-gpu   Source File: TestTFileStreams.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
Example 22
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private void writeNumMetablocks(Writer writer, String compression, int n)
    throws IOException {
  for (int i = 0; i < n; i++) {
    DataOutputStream dout =
        writer.prepareMetaBlock("TfileMeta" + i, compression);
    byte[] b = ("something to test" + i).getBytes();
    dout.write(b);
    dout.close();
  }
}
 
Example 23
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
private void someTestingWithMetaBlock(Writer writer, String compression)
    throws IOException {
  DataOutputStream dout = null;
  writeNumMetablocks(writer, compression, 10);
  try {
    dout = writer.prepareMetaBlock("TfileMeta1", compression);
    assertTrue(false);
  }
  catch (MetaBlockAlreadyExists me) {
    // avoid this exception
  }
  dout = writer.prepareMetaBlock("TFileMeta100", compression);
  dout.close();
}
 
Example 24
Source Project: hadoop   Source File: TestTFile.java    License: Apache License 2.0 5 votes vote down vote up
public void testMetaBlocks() throws IOException {
  Path mFile = new Path(ROOT, "meta.tfile");
  FSDataOutputStream fout = createFSOutput(mFile);
  Writer writer = new Writer(fout, minBlockSize, "none", null, conf);
  someTestingWithMetaBlock(writer, "none");
  writer.close();
  fout.close();
  FSDataInputStream fin = fs.open(mFile);
  Reader reader = new Reader(fin, fs.getFileStatus(mFile).getLen(), conf);
  someReadingWithMetaBlock(reader);
  fs.delete(mFile, true);
  reader.close();
  fin.close();
}
 
Example 25
Source Project: hadoop   Source File: TestTFileStreams.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
Example 26
Source Project: hadoop   Source File: TestTFileByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws IOException {
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
}
 
Example 27
Source Project: hadoop   Source File: TestTFileByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailureBadCompressionCodec() throws IOException {
  if (skip)
    return;
  closeOutput();
  out = fs.create(path);
  try {
    writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
    Assert.fail("Error on handling invalid compression codecs.");
  } catch (Exception e) {
    // noop, expecting exceptions
    // e.printStackTrace();
  }
}
 
Example 28
Source Project: hadoop   Source File: TestTFileByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
static long writeRecords(Writer writer, int count) throws IOException {
  long rawDataSize = 0;
  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
    rawDataSize +=
        WritableUtils.getVIntSize(key.length) + key.length
            + WritableUtils.getVIntSize(value.length) + value.length;
  }
  return rawDataSize;
}
 
Example 29
Source Project: hadoop   Source File: TestTFileUnsortedByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile);
  fs = path.getFileSystem(conf);
  out = fs.create(path);
  writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
  writer.append("keyZ".getBytes(), "valueZ".getBytes());
  writer.append("keyM".getBytes(), "valueM".getBytes());
  writer.append("keyN".getBytes(), "valueN".getBytes());
  writer.append("keyA".getBytes(), "valueA".getBytes());
  closeOutput();
}
 
Example 30
Source Project: hadoop   Source File: TestTFileComparators.java    License: Apache License 2.0 5 votes vote down vote up
public void testFailureBadComparatorNames() throws IOException {
  try {
    writer = new Writer(out, BLOCK_SIZE, compression, "badcmp", conf);
    Assert.fail("Failed to catch unsupported comparator names");
  }
  catch (Exception e) {
    // noop, expecting exceptions
    e.printStackTrace();
  }
}