org.apache.hadoop.hdfs.TestFileCreation Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.TestFileCreation.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlockUnderConstruction.java From hadoop with Apache License 2.0 | 6 votes |
void writeFile(Path file, FSDataOutputStream stm, int size) throws IOException { long blocksBefore = stm.getPos() / BLOCK_SIZE; TestFileCreation.writeFile(stm, BLOCK_SIZE); // need to make sure the full block is completely flushed to the DataNodes // (see FSOutputSummer#flush) stm.flush(); int blocksAfter = 0; // wait until the block is allocated by DataStreamer BlockLocation[] locatedBlocks; while(blocksAfter <= blocksBefore) { locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations( file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS); blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length; } }
Example #2
Source File: TestBlockUnderConstruction.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testBlockCreation() throws IOException { Path file1 = new Path(BASE_DIR, "file1.dat"); FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3); for(int idx = 0; idx < NUM_BLOCKS; idx++) { // write one block writeFile(file1, out, BLOCK_SIZE); // verify consistency verifyFileBlocks(file1.toString(), true); } // close file out.close(); // verify consistency verifyFileBlocks(file1.toString(), false); }
Example #3
Source File: TestBlockUnderConstruction.java From big-c with Apache License 2.0 | 6 votes |
void writeFile(Path file, FSDataOutputStream stm, int size) throws IOException { long blocksBefore = stm.getPos() / BLOCK_SIZE; TestFileCreation.writeFile(stm, BLOCK_SIZE); // need to make sure the full block is completely flushed to the DataNodes // (see FSOutputSummer#flush) stm.flush(); int blocksAfter = 0; // wait until the block is allocated by DataStreamer BlockLocation[] locatedBlocks; while(blocksAfter <= blocksBefore) { locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations( file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS); blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length; } }
Example #4
Source File: TestBlockUnderConstruction.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testBlockCreation() throws IOException { Path file1 = new Path(BASE_DIR, "file1.dat"); FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3); for(int idx = 0; idx < NUM_BLOCKS; idx++) { // write one block writeFile(file1, out, BLOCK_SIZE); // verify consistency verifyFileBlocks(file1.toString(), true); } // close file out.close(); // verify consistency verifyFileBlocks(file1.toString(), false); }
Example #5
Source File: TestBlockUnderConstruction.java From hadoop with Apache License 2.0 | 5 votes |
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode = cluster.getNameNodeRpc(); final Path p = new Path(BASE_DIR, "file2.dat"); final String src = p.toString(); final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3); // write a half block int len = BLOCK_SIZE >>> 1; writeFile(p, out, len); for(int i = 1; i < NUM_BLOCKS; ) { // verify consistency final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len); final List<LocatedBlock> blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block writeFile(p, out, BLOCK_SIZE); len += BLOCK_SIZE; } } // close file out.close(); }
Example #6
Source File: TestBlockUnderConstruction.java From big-c with Apache License 2.0 | 5 votes |
/** * Test NameNode.getBlockLocations(..) on reading un-closed files. */ @Test public void testGetBlockLocations() throws IOException { final NamenodeProtocols namenode = cluster.getNameNodeRpc(); final Path p = new Path(BASE_DIR, "file2.dat"); final String src = p.toString(); final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3); // write a half block int len = BLOCK_SIZE >>> 1; writeFile(p, out, len); for(int i = 1; i < NUM_BLOCKS; ) { // verify consistency final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len); final List<LocatedBlock> blocks = lb.getLocatedBlocks(); assertEquals(i, blocks.size()); final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock(); assertTrue(b instanceof BlockInfoContiguousUnderConstruction); if (++i < NUM_BLOCKS) { // write one more block writeFile(p, out, BLOCK_SIZE); len += BLOCK_SIZE; } } // close file out.close(); }
Example #7
Source File: TestAvatarDataNodeMultipleRegistrations.java From RDFS with Apache License 2.0 | 4 votes |
/** * Test that file data becomes available before file is closed. */ @Test public void testFileCreation() throws IOException, ConfigException, InterruptedException { Configuration conf = new Configuration(); if (simulatedStorage) { conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true); } MiniAvatarCluster cluster = new MiniAvatarCluster(conf, 1, true, null, null, 3, true); try { for (int i = 0; i < cluster.getNumNameNodes(); i++) { FileSystem fs = cluster.getFileSystem(i); // // check that / exists // Path path = new Path("/"); System.out.println("Path : \"" + path.toString() + "\""); System.out.println(fs.getFileStatus(path).isDir()); assertTrue("/ should be a directory", fs.getFileStatus(path).isDir() == true); // // Create a directory inside /, then try to overwrite it // Path dir1 = new Path("/test_dir"); fs.mkdirs(dir1); System.out.println("createFile: Creating " + dir1.getName() + " for overwrite of existing directory."); try { fs.create(dir1, true); // Create path, overwrite=true fs.close(); assertTrue("Did not prevent directory from being overwritten.", false); } catch (IOException ie) { if (!ie.getMessage().contains("already exists as a directory.")) throw ie; } // create a new file in home directory. Do not close it. // Path file1 = new Path("filestatus.dat"); FSDataOutputStream stm = TestFileCreation.createFile(fs, file1, 1); // verify that file exists in FS namespace assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isDir() == false); System.out.println("Path : \"" + file1 + "\""); // write to file TestFileCreation.writeFile(stm); // Make sure a client can read it before it is closed. checkFile(fs, file1, 1); // verify that file size has changed long len = fs.getFileStatus(file1).getLen(); assertTrue(file1 + " should be of size " + (numBlocks * blockSize) + " but found to be of size " + len, len == numBlocks * blockSize); stm.close(); // verify that file size has changed to the full size len = fs.getFileStatus(file1).getLen(); assertTrue(file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize); // Check storage usage // can't check capacities for real storage since the OS file system may be changing under us. if (simulatedStorage) { AvatarDataNode dn = cluster.getDataNodes().get(0); int namespaceId = cluster.getNameNode(i).avatars.get(0).avatar.getNamespaceID(); assertEquals(fileSize, dn.getFSDataset().getNSUsed(namespaceId)); //Because all namespaces share the same simulated dataset assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize*(i+1), dn.getFSDataset().getRemaining()); } } } finally { cluster.shutDown(); } }