Java Code Examples for org.apache.hadoop.fs.swift.util.SwiftTestUtils#dataset()

The following examples show how to use org.apache.hadoop.fs.swift.util.SwiftTestUtils#dataset() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSeek.java    From sahara-extra with Apache License 2.0 6 votes vote down vote up
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
Example 2
Source File: TestSeek.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
Example 3
Source File: TestSeek.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testPositionedBulkReadDoesntChangePosition() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  instream.seek(39999);
  assertTrue(-1 != instream.read());
  assertEquals (40000, instream.getPos());

  byte[] readBuffer = new byte[256];
  instream.read(128, readBuffer, 0, readBuffer.length);
  //have gone back
  assertEquals(40000, instream.getPos());
  //content is the same too
  assertEquals("@40000", block[40000], (byte) instream.read());
  //now verify the picked up data
  for (int i = 0; i < 256; i++) {
    assertEquals("@" + i, block[i + 128], readBuffer[i]);
  }
}
 
Example 4
Source File: TestSeek.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testSeekBigFile() throws Throwable {
  Path testSeekFile = new Path(testPath, "bigseekfile.txt");
  byte[] block = SwiftTestUtils.dataset(65536, 0, 255);
  createFile(testSeekFile, block);
  instream = fs.open(testSeekFile);
  assertEquals(0, instream.getPos());
  //expect that seek to 0 works
  instream.seek(0);
  int result = instream.read();
  assertEquals(0, result);
  assertEquals(1, instream.read());
  assertEquals(2, instream.read());

  //do seek 32KB ahead
  instream.seek(32768);
  assertEquals("@32768", block[32768], (byte) instream.read());
  instream.seek(40000);
  assertEquals("@40000", block[40000], (byte) instream.read());
  instream.seek(8191);
  assertEquals("@8191", block[8191], (byte) instream.read());
  instream.seek(0);
  assertEquals("@0", 0, (byte) instream.read());
}
 
Example 5
Source File: TestReadPastBuffer.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  byte[] block = SwiftTestUtils.dataset(SEEK_FILE_LEN, 0, 255);

  //delete the test directory
  testPath = path("/test");
  readFile = new Path(testPath, "TestReadPastBuffer.txt");
  createFile(readFile, block);
}
 
Example 6
Source File: TestSwiftFileSystemPartitionedUploads.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
Example 7
Source File: TestSeek.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  //delete the test directory
  testPath = path("/test");
  smallSeekFile = new Path(testPath, "seekfile.txt");
  zeroByteFile = new Path(testPath, "zero.txt");
  byte[] block = SwiftTestUtils.dataset(SMALL_SEEK_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(smallSeekFile, block);
  createEmptyFile(zeroByteFile);
}
 
Example 8
Source File: TestSeek.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  //delete the test directory
  testPath = path("/test");
  smallSeekFile = new Path(testPath, "seekfile.txt");
  zeroByteFile = new Path(testPath, "zero.txt");
  byte[] block = SwiftTestUtils.dataset(SMALL_SEEK_FILE_LEN, 0, 255);
  //this file now has a simple rule: offset => value
  createFile(smallSeekFile, block);
  createEmptyFile(zeroByteFile);
}
 
Example 9
Source File: TestSwiftFileSystemPartitionedUploads.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
Example 10
Source File: TestSwiftFileSystemPartitionedUploads.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Test writes partitioned file writing that path is qualified.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testQualifiedPath() throws Throwable {
  final Path path = path("/test/qualifiedPath");
  int len = PART_SIZE_BYTES * 4;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
Example 11
Source File: TestReadPastBuffer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Setup creates dirs under test/hadoop
 *
 * @throws Exception
 */
@Override
public void setUp() throws Exception {
  super.setUp();
  byte[] block = SwiftTestUtils.dataset(SEEK_FILE_LEN, 0, 255);

  //delete the test directory
  testPath = path("/test");
  readFile = new Path(testPath, "TestReadPastBuffer.txt");
  createFile(readFile, block);
}
 
Example 12
Source File: TestSwiftFileSystemPartitionedUploads.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
Example 13
Source File: TestSwiftFileSystemPartitionedUploads.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that when a partitioned file is overwritten by a smaller one,
 * all the old partitioned files go away
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
  final Path path = new Path("/test/testOverwritePartitionedFile");

  final int len1 = 8192;
  final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     1024);
  out.write(src1, 0, len1);
  out.close();
  long expected = getExpectedPartitionsWritten(len1,
                                               PART_SIZE_BYTES,
                                               false);
  assertPartitionsWritten("initial upload", out, expected);
  assertExists("Exists", path);
  FileStatus status = fs.getFileStatus(path);
  assertEquals("Length", len1, status.getLen());
  //now write a shorter file with a different dataset
  final int len2 = 4095;
  final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
  out = fs.create(path,
                  true,
                  getBufferSize(),
                  (short) 1,
                  1024);
  out.write(src2, 0, len2);
  out.close();
  status = fs.getFileStatus(path);
  assertEquals("Length", len2, status.getLen());
  byte[] dest = readDataset(fs, path, len2);
  //compare data
  SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
 
Example 14
Source File: TestSwiftFileSystemPartitionedUploads.java    From sahara-extra with Apache License 2.0 5 votes vote down vote up
/**
 * Test that when a partitioned file is overwritten by a smaller one,
 * all the old partitioned files go away
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
  final Path path = new Path("/test/testOverwritePartitionedFile");

  final int len1 = 8192;
  final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     1024);
  out.write(src1, 0, len1);
  out.close();
  long expected = getExpectedPartitionsWritten(len1,
                                               PART_SIZE_BYTES,
                                               false);
  assertPartitionsWritten("initial upload", out, expected);
  assertExists("Exists", path);
  FileStatus status = fs.getFileStatus(path);
  assertEquals("Length", len1, status.getLen());
  //now write a shorter file with a different dataset
  final int len2 = 4095;
  final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
  out = fs.create(path,
                  true,
                  getBufferSize(),
                  (short) 1,
                  1024);
  out.write(src2, 0, len2);
  out.close();
  status = fs.getFileStatus(path);
  assertEquals("Length", len2, status.getLen());
  byte[] dest = readDataset(fs, path, len2);
  //compare data
  SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
 
Example 15
Source File: TestSwiftFileSystemPartitionedUploads.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 16
Source File: TestSwiftFileSystemPartitionedUploads.java    From sahara-extra with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 17
Source File: TestSwiftFileSystemPartitionedUploads.java    From sahara-extra with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 18
Source File: TestSwiftFileSystemPartitionedUploads.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 19
Source File: TestSwiftFileSystemPartitionedUploads.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
Example 20
Source File: TestSwiftFileSystemPartitionedUploads.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}