Java Code Examples for org.apache.hadoop.hdfs.server.datanode.StorageLocation#parse()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.StorageLocation#parse() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestContainerPersistence.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
public void setupPaths() throws IOException {
  containerSet = new ContainerSet();
  volumeSet = new MutableVolumeSet(DATANODE_UUID, conf);
  blockManager = new BlockManagerImpl(conf);
  chunkManager = ChunkManagerFactory.createChunkManager(conf, blockManager);

  for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
    StorageLocation location = StorageLocation.parse(dir);
    FileUtils.forceMkdir(new File(location.getNormalizedUri()));
  }
}
 
Example 2
Source File: TestContainerPersistence.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@After
public void cleanupDir() throws IOException {
  // Clean up SCM metadata
  log.info("Deleting {}", hddsPath);
  FileUtils.deleteDirectory(new File(hddsPath));

  // Clean up SCM datanode container metadata/data
  for (String dir : conf.getStrings(ScmConfigKeys.HDDS_DATANODE_DIR_KEY)) {
    StorageLocation location = StorageLocation.parse(dir);
    FileUtils.deleteDirectory(new File(location.getNormalizedUri()));
  }
}
 
Example 3
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
Example 4
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example 5
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
Example 6
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
Example 7
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example 8
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
Example 9
Source File: MutableVolumeSet.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Add DN volumes configured through ConfigKeys to volumeMap.
 */
private void initializeVolumeSet() throws IOException {
  volumeMap = new ConcurrentHashMap<>();
  failedVolumeMap = new ConcurrentHashMap<>();
  volumeStateMap = new EnumMap<>(StorageType.class);

  Collection<String> rawLocations = getDatanodeStorageDirs(conf);

  for (StorageType storageType : StorageType.values()) {
    volumeStateMap.put(storageType, new ArrayList<>());
  }

  for (String locationString : rawLocations) {
    try {
      StorageLocation location = StorageLocation.parse(locationString);

      HddsVolume hddsVolume = createVolume(location.getUri().getPath(),
          location.getStorageType());

      checkAndSetClusterID(hddsVolume.getClusterID());

      LOG.info("Added Volume : {} to VolumeSet",
          hddsVolume.getHddsRootDir().getPath());

      if (!hddsVolume.getHddsRootDir().mkdirs() &&
          !hddsVolume.getHddsRootDir().exists()) {
        throw new IOException("Failed to create HDDS storage dir " +
            hddsVolume.getHddsRootDir());
      }
      volumeMap.put(hddsVolume.getHddsRootDir().getPath(), hddsVolume);
      volumeStateMap.get(hddsVolume.getStorageType()).add(hddsVolume);
    } catch (IOException e) {
      HddsVolume volume = new HddsVolume.Builder(locationString)
          .failedVolume(true).build();
      failedVolumeMap.put(locationString, volume);
      LOG.error("Failed to parse the storage location: " + locationString, e);
    }
  }

  // First checking if we have any volumes, if all volumes are failed the
  // volumeMap size will be zero, and we throw Exception.
  if (volumeMap.size() == 0) {
    throw new DiskOutOfSpaceException("No storage locations configured");
  }

  checkAllVolumes();

  // Ensure volume threads are stopped and scm df is saved during shutdown.
  shutdownHook = () -> {
    saveVolumeSetUsed();
  };
  ShutdownHookManager.get().addShutdownHook(shutdownHook,
      SHUTDOWN_HOOK_PRIORITY);
}