org.apache.hadoop.hdfs.server.datanode.DataNode Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.server.datanode.DataNode.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FileChecksumServlets.java From hadoop with Apache License 2.0 | 6 votes |
@Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws ServletException, IOException { final PrintWriter out = response.getWriter(); final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum"); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); final ServletContext context = getServletContext(); final DataNode datanode = (DataNode) context.getAttribute("datanode"); final Configuration conf = new HdfsConfiguration(datanode.getConf()); try { final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, datanode, conf, getUGI(request, conf)); final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { writeXml(ioe, path, xml); } catch (InterruptedException e) { writeXml(e, path, xml); } xml.endDocument(); }
Example #2
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 6 votes |
/** * Multiple-NameNode version of injectBlocks. */ public void injectBlocks(int nameNodeIndex, int dataNodeIndex, Iterable<Block> blocksToInject) throws IOException { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } final DataNode dn = dataNodes.get(dataNodeIndex).datanode; final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn); if (!(dataSet instanceof SimulatedFSDataset)) { throw new IOException("injectBlocks is valid only for SimilatedFSDataset"); } String bpid = getNamesystem(nameNodeIndex).getBlockPoolId(); SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet; sdataset.injectBlocks(bpid, blocksToInject); dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0); }
Example #3
Source File: MiniDFSCluster.java From hadoop with Apache License 2.0 | 6 votes |
/** * Multiple-NameNode version of injectBlocks. */ public void injectBlocks(int nameNodeIndex, int dataNodeIndex, Iterable<Block> blocksToInject) throws IOException { if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) { throw new IndexOutOfBoundsException(); } final DataNode dn = dataNodes.get(dataNodeIndex).datanode; final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn); if (!(dataSet instanceof SimulatedFSDataset)) { throw new IOException("injectBlocks is valid only for SimilatedFSDataset"); } String bpid = getNamesystem(nameNodeIndex).getBlockPoolId(); SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet; sdataset.injectBlocks(bpid, blocksToInject); dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0); }
Example #4
Source File: TestWriteToReplica.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testClose() throws Exception { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build(); try { cluster.waitActive(); DataNode dn = cluster.getDataNodes().get(0); FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn); // set up replicasMap String bpid = cluster.getNamesystem().getBlockPoolId(); ExtendedBlock[] blocks = setup(bpid, dataSet); // test close testClose(dataSet, blocks); } finally { cluster.shutdown(); } }
Example #5
Source File: TestFavoredNodesEndToEnd.java From hadoop with Apache License 2.0 | 6 votes |
private InetSocketAddress getArbitraryLocalHostAddr() throws UnknownHostException{ Random rand = new Random(System.currentTimeMillis()); int port = rand.nextInt(65535); while (true) { boolean conflict = false; for (DataNode d : datanodes) { if (d.getXferAddress().getPort() == port) { port = rand.nextInt(65535); conflict = true; } } if (conflict == false) { break; } } return new InetSocketAddress(InetAddress.getLocalHost(), port); }
Example #6
Source File: DFSOutputStream.java From RDFS with Apache License 2.0 | 6 votes |
/** * create a heartbeat packet */ Packet() { this.lastPacketInBlock = false; this.numChunks = 0; this.offsetInBlock = 0; this.seqno = HEART_BEAT_SEQNO; buffer = null; int packetSize = DataNode.PKT_HEADER_LEN + DFSClient.SIZE_OF_INTEGER; buf = new byte[packetSize]; checksumStart = dataStart = packetSize; checksumPos = checksumStart; dataPos = dataStart; maxChunks = 0; }
Example #7
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** Find the metadata file for the specified block file. * Return the generation stamp from the name of the metafile. */ static long getGenerationStampFromFile(String[] listdir, String blockName) { for (int j = 0; j < listdir.length; j++) { String path = listdir[j]; if (!path.startsWith(blockName)) { continue; } String[] vals = StringUtils.split(path, '_'); if (vals.length != 3) { // blk, blkid, genstamp.meta continue; } String[] str = StringUtils.split(vals[2], '.'); if (str.length != 2) { continue; } return Long.parseLong(str[0]); } DataNode.LOG.warn("Block " + blockName + " does not have a metafile!"); return Block.GRANDFATHER_GENERATION_STAMP; }
Example #8
Source File: FSDataset.java From RDFS with Apache License 2.0 | 6 votes |
/** * Remove a block from disk * @param blockFile block file * @param metaFile block meta file * @param b a block * @return true if on-disk files are deleted; false otherwise */ private boolean delBlockFromDisk(File blockFile, File metaFile, Block b) { if (blockFile == null) { DataNode.LOG.warn("No file exists for block: " + b); return true; } if (!blockFile.delete()) { DataNode.LOG.warn("Not able to delete the block file: " + blockFile); return false; } else { // remove the meta file if (metaFile != null && !metaFile.delete()) { DataNode.LOG.warn( "Not able to delete the meta block file: " + metaFile); return false; } } return true; }
Example #9
Source File: TestDecommission.java From big-c with Apache License 2.0 | 5 votes |
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException { LOG.info("Starting test testClusterStats"); int numDatanodes = 1; startCluster(numNameNodes, numDatanodes, conf); for (int i = 0; i < numNameNodes; i++) { FileSystem fileSys = cluster.getFileSystem(i); Path file = new Path("testClusterStats.dat"); writeFile(fileSys, file, 1); FSNamesystem fsn = cluster.getNamesystem(i); NameNode namenode = cluster.getNameNode(i); DatanodeInfo decomInfo = decommissionNode(i, null, null, AdminStates.DECOMMISSION_INPROGRESS); DataNode decomNode = getDataNode(decomInfo); // Check namenode stats for multiple datanode heartbeats verifyStats(namenode, fsn, decomInfo, decomNode, true); // Stop decommissioning and verify stats writeConfigFile(excludeFile, null); refreshNodes(fsn, conf); DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo); DataNode retNode = getDataNode(decomInfo); waitNodeState(retInfo, AdminStates.NORMAL); verifyStats(namenode, fsn, retInfo, retNode, false); } }
Example #10
Source File: TestInterDatanodeProtocol.java From RDFS with Apache License 2.0 | 5 votes |
public static LocatedBlockWithMetaInfo getLastLocatedBlock( ClientProtocol namenode, String src ) throws IOException { //get block info for the last block LocatedBlocksWithMetaInfo locations = namenode.openAndFetchMetaInfo (src, 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); DataNode.LOG.info("blocks.size()=" + blocks.size()); assertTrue(blocks.size() > 0); LocatedBlock blk = blocks.get(blocks.size() - 1); return new LocatedBlockWithMetaInfo(blk.getBlock(), blk.getLocations(), blk.getStartOffset(), locations.getDataProtocolVersion(), locations.getNamespaceID(), locations.getMethodFingerPrint()); }
Example #11
Source File: TestFiPipelines.java From big-c with Apache License 2.0 | 5 votes |
private static void initLoggers() { ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL); }
Example #12
Source File: MiniDFSCluster.java From RDFS with Apache License 2.0 | 5 votes |
/** * Wait for the given datanode to heartbeat once. */ public void waitForDNHeartbeat(int dnIndex, long timeoutMillis) throws IOException, InterruptedException { DataNode dn = getDataNodes().get(dnIndex); for (int i = 0; i<nameNodes.length; i++) { waitForDNHeartbeat(dn, timeoutMillis, i); } }
Example #13
Source File: TestRbwSpaceReservation.java From hadoop with Apache License 2.0 | 5 votes |
/** * Ensure that reserved space is released when the client goes away * unexpectedly. * * The verification is done for each replica in the write pipeline. * * @throws IOException */ @Test(timeout=300000) public void testSpaceReleasedOnUnexpectedEof() throws IOException, InterruptedException, TimeoutException { final short replication = 3; startCluster(BLOCK_SIZE, replication, -1); final String methodName = GenericTestUtils.getMethodName(); final Path file = new Path("/" + methodName + ".01.dat"); // Write 1 byte to the file and kill the writer. FSDataOutputStream os = fs.create(file, replication); os.write(new byte[1]); os.hsync(); DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream()); // Ensure all space reserved for the replica was released on each // DataNode. for (DataNode dn : cluster.getDataNodes()) { final FsVolumeImpl volume = (FsVolumeImpl) dn.getFSDataset().getVolumes().get(0); GenericTestUtils.waitFor(new Supplier<Boolean>() { @Override public Boolean get() { return (volume.getReservedForRbw() == 0); } }, 500, Integer.MAX_VALUE); // Wait until the test times out. } }
Example #14
Source File: TestRbwSpaceReservation.java From hadoop with Apache License 2.0 | 5 votes |
/** * * @param blockSize * @param perVolumeCapacity limit the capacity of each volume to the given * value. If negative, then don't limit. * @throws IOException */ private void startCluster(int blockSize, int numDatanodes, long perVolumeCapacity) throws IOException { initConfig(blockSize); cluster = new MiniDFSCluster .Builder(conf) .storagesPerDatanode(STORAGES_PER_DATANODE) .numDataNodes(numDatanodes) .build(); fs = cluster.getFileSystem(); client = fs.getClient(); cluster.waitActive(); if (perVolumeCapacity >= 0) { for (DataNode dn : cluster.getDataNodes()) { for (FsVolumeSpi volume : dn.getFSDataset().getVolumes()) { ((FsVolumeImpl) volume).setCapacityForTesting(perVolumeCapacity); } } } if (numDatanodes == 1) { List<? extends FsVolumeSpi> volumes = cluster.getDataNodes().get(0).getFSDataset().getVolumes(); assertThat(volumes.size(), is(1)); singletonVolume = ((FsVolumeImpl) volumes.get(0)); } }
Example #15
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 5 votes |
/** @return the datanode having the ipc server listen port */ public DataNode getDataNode(int ipcPort) { for(DataNode dn : getDataNodes()) { if (dn.ipcServer.getListenerAddress().getPort() == ipcPort) { return dn; } } return null; }
Example #16
Source File: TestDatanodeReport.java From hadoop with Apache License 2.0 | 5 votes |
static DataNode findDatanode(String id, List<DataNode> datanodes) { for(DataNode d : datanodes) { if (d.getDatanodeUuid().equals(id)) { return d; } } throw new IllegalStateException("Datnode " + id + " not in datanode list: " + datanodes); }
Example #17
Source File: SnapshotTestHelper.java From hadoop with Apache License 2.0 | 5 votes |
/** Disable the logs that are not very useful for snapshot related tests. */ public static void disableLogs() { final String[] lognames = { "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService", }; for(String n : lognames) { GenericTestUtils.disableLog(LogFactory.getLog(n)); } GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class)); GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class)); GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class)); GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class)); GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class)); GenericTestUtils.disableLog(BlockScanner.LOG); GenericTestUtils.disableLog(HttpServer2.LOG); GenericTestUtils.disableLog(DataNode.LOG); GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG); GenericTestUtils.disableLog(LeaseManager.LOG); GenericTestUtils.disableLog(NameNode.stateChangeLog); GenericTestUtils.disableLog(NameNode.blockStateChangeLog); GenericTestUtils.disableLog(DFSClient.LOG); GenericTestUtils.disableLog(Server.LOG); }
Example #18
Source File: MiniDFSCluster.java From RDFS with Apache License 2.0 | 5 votes |
public void shutdownDataNode(int index, boolean remove) { System.out.println("Shutting down DataNode " + index); DataNode dn = remove ? dataNodes.remove(index).datanode : dataNodes .get(index).datanode; dn.shutdown(); numDataNodes--; }
Example #19
Source File: MiniDFSCluster.java From big-c with Apache License 2.0 | 5 votes |
public synchronized DataNodeProperties stopDataNode(String dnName) { int node = -1; for (int i = 0; i < dataNodes.size(); i++) { DataNode dn = dataNodes.get(i).datanode; LOG.info("DN name=" + dnName + " found DN=" + dn + " with name=" + dn.getDisplayName()); if (dnName.equals(dn.getDatanodeId().getXferAddr())) { node = i; break; } } return stopDataNode(node); }
Example #20
Source File: TestHDFSServerPorts.java From hadoop with Apache License 2.0 | 5 votes |
/** * Start the datanode. */ public DataNode startDataNode(int index, Configuration config) throws IOException { File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath()); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return DataNode.createDataNode(args, config); }
Example #21
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
public void run() { DataNode.LOG.info("Start building volume: " + volume); try { for (Integer namespaceId : namespaceIdDir.keySet()) { volume.addNamespace(namespaceId, namespaceIdDir.get(namespaceId), conf, supportAppends); } } catch (IOException ioe) { DataNode.LOG.error("Error building volume : " + volume, ioe); hasError = true; } DataNode.LOG.info("Finish building volume for " + volume); }
Example #22
Source File: MiniDFSCluster.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** @return the datanode having the ipc server listen port */ public DataNode getDataNode(int ipcPort) { for(DataNode dn : getDataNodes()) { if (dn.ipcServer.getListenerAddress().getPort() == ipcPort) { return dn; } } return null; }
Example #23
Source File: DFSClient.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private void computePacketChunkSize(int psize, int csize) { int chunkSize = csize + checksum.getChecksumSize(); int n = DataNode.PKT_HEADER_LEN + SIZE_OF_INTEGER; chunksPerPacket = Math.max((psize - n + chunkSize-1)/chunkSize, 1); packetSize = n + chunkSize*chunksPerPacket; if (LOG.isDebugEnabled()) { LOG.debug("computePacketChunkSize: src=" + src + ", chunkSize=" + chunkSize + ", chunksPerPacket=" + chunksPerPacket + ", packetSize=" + packetSize); } }
Example #24
Source File: TestStorageMover.java From hadoop with Apache License 2.0 | 5 votes |
/** * Verify block locations after running the migration tool. */ void verify(boolean verifyAll) throws Exception { for (DataNode dn : cluster.getDataNodes()) { DataNodeTestUtils.triggerBlockReport(dn); } if (verifyAll) { verifyNamespace(); } }
Example #25
Source File: TestHDFSServerPorts.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Start the data-node. */ public DataNode startDataNode(int index, Configuration config) throws IOException { String dataDir = System.getProperty("test.build.data"); File dataNodeDir = new File(dataDir, "data-" + index); config.set("dfs.data.dir", dataNodeDir.getPath()); String[] args = new String[] {}; // NameNode will modify config with the ports it bound to return DataNode.createDataNode(args, config); }
Example #26
Source File: TestAvatarRefreshNamenodes.java From RDFS with Apache License 2.0 | 5 votes |
@Test public void testRefreshNamenodes() throws Exception { MiniAvatarCluster cluster = null; try { cluster = new MiniAvatarCluster(conf, 1, true, null, null, 1, true); DataNode dn = cluster.getDataNodes().get(0); assertEquals(dn.getAllNamespaceServices().length, 1); cluster.addNameNode(conf); assertEquals(dn.getAllNamespaceServices().length, 2); cluster.addNameNode(conf); assertEquals(dn.getAllNamespaceServices().length, 3); cluster.addNameNode(conf); assertEquals(dn.getAllNamespaceServices().length, 4); int[] nns = null; nns = new int[]{0, 1, 2, 3}; compareAddress(cluster, dn, nns); nns = new int[]{0, 1}; Configuration conf1 = new Configuration(conf); setupAddress(conf1, new int[]{0, 1}); dn.refreshNamenodes(conf1); waitDataNodeInitialized(dn); compareAddress(cluster, dn, nns); nns = new int[]{0,2,3}; Configuration conf2 = new Configuration(conf); setupAddress(conf2, new int[]{0,2,3}); dn.refreshNamenodes(conf2); waitDataNodeInitialized(dn); compareAddress(cluster, dn, nns); } finally { cluster.shutDown(); } }
Example #27
Source File: FSDataset.java From RDFS with Apache License 2.0 | 5 votes |
void getBlocksBeingWrittenInfo(LightWeightHashSet<Block> blockSet) { if (rbwDir == null) { return; } File[] blockFiles = rbwDir.listFiles(); if (blockFiles == null) { return; } String[] blockFileNames = getFileNames(blockFiles); for (int i = 0; i < blockFiles.length; i++) { if (!blockFiles[i].isDirectory()) { // get each block in the rbwDir direcotry if (Block.isBlockFilename(blockFileNames[i])) { long genStamp = FSDataset.getGenerationStampFromFile( blockFileNames, blockFileNames[i]); Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp); // add this block to block set blockSet.add(block); if (DataNode.LOG.isDebugEnabled()) { DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block); } } } } }
Example #28
Source File: TestClientReportBadBlock.java From hadoop with Apache License 2.0 | 5 votes |
/** * Corrupt a block on a data node. Replace the block file content with content * of 1, 2, ...BLOCK_SIZE. * * @param block * the ExtendedBlock to be corrupted * @param dn * the data node where the block needs to be corrupted * @throws FileNotFoundException * @throws IOException */ private static void corruptBlock(final ExtendedBlock block, final DataNode dn) throws FileNotFoundException, IOException { final File f = DataNodeTestUtils.getBlockFile( dn, block.getBlockPoolId(), block.getLocalBlock()); final RandomAccessFile raFile = new RandomAccessFile(f, "rw"); final byte[] bytes = new byte[(int) BLOCK_SIZE]; for (int i = 0; i < BLOCK_SIZE; i++) { bytes[i] = (byte) (i); } raFile.write(bytes); raFile.close(); }
Example #29
Source File: DFSOutputStream.java From RDFS with Apache License 2.0 | 5 votes |
private void computePacketChunkSize(int psize, int csize) { int chunkSize = csize + checksum.getChecksumSize(); int n = DataNode.PKT_HEADER_LEN + DFSClient.SIZE_OF_INTEGER; chunksPerPacket = Math.max((psize - n + chunkSize-1)/chunkSize, 1); packetSize = n + chunkSize*chunksPerPacket; if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("computePacketChunkSize: src=" + src + ", chunkSize=" + chunkSize + ", chunksPerPacket=" + chunksPerPacket + ", packetSize=" + packetSize); } }
Example #30
Source File: TestIsMethodSupported.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { cluster = (new MiniDFSCluster.Builder(conf)) .numDataNodes(1).build(); nnAddress = cluster.getNameNode().getNameNodeAddress(); DataNode dn = cluster.getDataNodes().get(0); dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(), dn.getIpcPort()); }