Java Code Examples for org.apache.hadoop.hdfs.server.common.Storage#StorageDirectory

The following examples show how to use org.apache.hadoop.hdfs.server.common.Storage#StorageDirectory . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static void createStorageDirs(DataStorage storage, Configuration conf,
    int numDirs) throws IOException {
  List<Storage.StorageDirectory> dirs =
      new ArrayList<Storage.StorageDirectory>();
  List<String> dirStrings = new ArrayList<String>();
  for (int i = 0; i < numDirs; i++) {
    File loc = new File(BASE_DIR + "/data" + i);
    dirStrings.add(new Path(loc.toString()).toUri().toString());
    loc.mkdirs();
    dirs.add(createStorageDirectory(loc));
    when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
  }

  String dataDir = StringUtils.join(",", dirStrings);
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
  when(storage.dirIterator()).thenReturn(dirs.iterator());
  when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
 
Example 2
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
Example 3
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Gets initial volume failure information for all volumes that failed
 * immediately at startup.  The method works by determining the set difference
 * between all configured storage locations and the actual storage locations in
 * use after attempting to put all of them into service.
 *
 * @return each storage location that has failed
 */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
    Collection<StorageLocation> dataLocations, DataStorage storage) {
  Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
      dataLocations.size());
  for (StorageLocation sl: dataLocations) {
    failedLocationSet.add(sl.getFile().getAbsolutePath());
  }
  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory sd = it.next();
    failedLocationSet.remove(sd.getRoot().getAbsolutePath());
  }
  List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
      failedLocationSet.size());
  long failureDate = Time.now();
  for (String failedStorageLocation: failedLocationSet) {
    volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
        failureDate));
  }
  return volumeFailureInfos;
}
 
Example 4
Source File: TestDataNodeHotSwapVolumes.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAddBackRemovedVolume()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  // Create some data on every volume.
  createFile(new Path("/test"), 32);

  DataNode dn = cluster.getDataNodes().get(0);
  Configuration conf = dn.getConf();
  String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
  String keepDataDir = oldDataDir.split(",")[0];
  String removeDataDir = oldDataDir.split(",")[1];

  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir);
  for (int i = 0; i < cluster.getNumNameNodes(); i++) {
    String bpid = cluster.getNamesystem(i).getBlockPoolId();
    BlockPoolSliceStorage bpsStorage =
        dn.getStorage().getBPStorage(bpid);
    // Make sure that there is no block pool level storage under removeDataDir.
    for (int j = 0; j < bpsStorage.getNumStorageDirs(); j++) {
      Storage.StorageDirectory sd = bpsStorage.getStorageDir(j);
      assertFalse(sd.getRoot().getAbsolutePath().startsWith(
          new File(removeDataDir).getAbsolutePath()
      ));
    }
    assertEquals(dn.getStorage().getBPStorage(bpid).getNumStorageDirs(), 1);
  }

  // Bring the removed directory back. It only successes if all metadata about
  // this directory were removed from the previous step.
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
}
 
Example 5
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
Example 6
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example 7
Source File: DataNode.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
 * changed volumes.
 * @param newVolumes a comma separated string that specifies the data volumes.
 * @return changed volumes.
 * @throws IOException if none of the directories are specified in the
 * configuration.
 */
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
  Configuration conf = new Configuration();
  conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
  List<StorageLocation> locations = getStorageLocations(conf);

  if (locations.isEmpty()) {
    throw new IOException("No directory is specified.");
  }

  ChangedVolumes results = new ChangedVolumes();
  results.newLocations.addAll(locations);

  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory dir = it.next();
    boolean found = false;
    for (Iterator<StorageLocation> sl = results.newLocations.iterator();
         sl.hasNext(); ) {
      StorageLocation location = sl.next();
      if (location.getFile().getCanonicalPath().equals(
          dir.getRoot().getCanonicalPath())) {
        sl.remove();
        results.unchangedLocations.add(location);
        found = true;
        break;
      }
    }

    if (!found) {
      results.deactivateLocations.add(
          StorageLocation.parse(dir.getRoot().toString()));
    }
  }

  return results;
}
 
Example 8
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testAddBackRemovedVolume()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  startDFSCluster(1, 2);
  // Create some data on every volume.
  createFile(new Path("/test"), 32);

  DataNode dn = cluster.getDataNodes().get(0);
  Configuration conf = dn.getConf();
  String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
  String keepDataDir = oldDataDir.split(",")[0];
  String removeDataDir = oldDataDir.split(",")[1];

  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, keepDataDir);
  for (int i = 0; i < cluster.getNumNameNodes(); i++) {
    String bpid = cluster.getNamesystem(i).getBlockPoolId();
    BlockPoolSliceStorage bpsStorage =
        dn.getStorage().getBPStorage(bpid);
    // Make sure that there is no block pool level storage under removeDataDir.
    for (int j = 0; j < bpsStorage.getNumStorageDirs(); j++) {
      Storage.StorageDirectory sd = bpsStorage.getStorageDir(j);
      assertFalse(sd.getRoot().getAbsolutePath().startsWith(
          new File(removeDataDir).getAbsolutePath()
      ));
    }
    assertEquals(dn.getStorage().getBPStorage(bpid).getNumStorageDirs(), 1);
  }

  // Bring the removed directory back. It only successes if all metadata about
  // this directory were removed from the previous step.
  dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
}
 
Example 9
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void addVolume(Collection<StorageLocation> dataLocations,
    Storage.StorageDirectory sd) throws IOException {
  final File dir = sd.getCurrentDir();
  final StorageType storageType =
      getStorageTypeFromLocations(dataLocations, sd.getRoot());

  // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
  // nothing needed to be rolled back to make various data structures, e.g.,
  // storageMap and asyncDiskService, consistent.
  FsVolumeImpl fsVolume = new FsVolumeImpl(
      this, sd.getStorageUuid(), dir, this.conf, storageType);
  FsVolumeReference ref = fsVolume.obtainReference();
  ReplicaMap tempVolumeMap = new ReplicaMap(this);
  fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);

  synchronized (this) {
    volumeMap.addAll(tempVolumeMap);
    storageMap.put(sd.getStorageUuid(),
        new DatanodeStorage(sd.getStorageUuid(),
            DatanodeStorage.State.NORMAL,
            storageType));
    asyncDiskService.addVolume(sd.getCurrentDir());
    volumes.addVolume(ref);
  }

  LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
 
Example 10
Source File: TestDataStorage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  File bpRoot = new File(sd.getCurrentDir(), bpid);
  Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
  assertTrue(bpSd.getRoot().isDirectory());
  assertTrue(bpSd.getCurrentDir().isDirectory());
  assertTrue(bpSd.getVersionFile().isFile());
}
 
Example 11
Source File: TestDataStorage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Check whether the root is a valid BlockPoolSlice storage. */
private static void checkDir(File root, String bpid) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  File bpRoot = new File(sd.getCurrentDir(), bpid);
  Storage.StorageDirectory bpSd = new Storage.StorageDirectory(bpRoot);
  assertTrue(bpSd.getRoot().isDirectory());
  assertTrue(bpSd.getCurrentDir().isDirectory());
  assertTrue(bpSd.getVersionFile().isFile());
}
 
Example 12
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
  FsDatasetImpl spyDataset = spy(dataset);
  FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
  File badDir = new File(BASE_DIR, "bad");
  badDir.mkdirs();
  doReturn(mockVolume).when(spyDataset)
      .createFsVolume(anyString(), any(File.class), any(StorageType.class));
  doThrow(new IOException("Failed to getVolumeMap()"))
    .when(mockVolume).getVolumeMap(
      anyString(),
      any(ReplicaMap.class),
      any(RamDiskReplicaLruTracker.class));

  Storage.StorageDirectory sd = createStorageDirectory(badDir);
  sd.lock();
  DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(badDir.getAbsoluteFile()),
      Matchers.<List<NamespaceInfo>>any()))
      .thenReturn(builder);

  StorageLocation location = StorageLocation.parse(badDir.toString());
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }

  try {
    spyDataset.addVolume(location, nsInfos);
    fail("Expect to throw MultipleIOException");
  } catch (MultipleIOException e) {
  }

  FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
 
Example 13
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
  final int numExistingVolumes = dataset.getVolumes().size();
  List<NamespaceInfo> nsInfos = new ArrayList<>();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
  StorageLocation loc = StorageLocation.parse(newVolumePath);

  Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
  DataStorage.VolumeBuilder builder =
      new DataStorage.VolumeBuilder(storage, sd);
  when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
      anyListOf(NamespaceInfo.class)))
      .thenReturn(builder);

  dataset.addVolume(loc, nsInfos);
  assertEquals(numExistingVolumes + 1, dataset.getVolumes().size());

  when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
  when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
  Set<File> volumesToRemove = new HashSet<>();
  volumesToRemove.add(loc.getFile());
  dataset.removeVolumes(volumesToRemove, true);
  assertEquals(numExistingVolumes, dataset.getVolumes().size());
}
 
Example 14
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddVolumes() throws IOException {
  final int numNewVolumes = 3;
  final int numExistingVolumes = dataset.getVolumes().size();
  final int totalVolumes = numNewVolumes + numExistingVolumes;
  Set<String> expectedVolumes = new HashSet<String>();
  List<NamespaceInfo> nsInfos = Lists.newArrayList();
  for (String bpid : BLOCK_POOL_IDS) {
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
  }
  for (int i = 0; i < numNewVolumes; i++) {
    String path = BASE_DIR + "/newData" + i;
    String pathUri = new Path(path).toUri().toString();
    expectedVolumes.add(new File(pathUri).toString());
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder =
        new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc.getFile()),
        anyListOf(NamespaceInfo.class)))
        .thenReturn(builder);

    dataset.addVolume(loc, nsInfos);
  }

  assertEquals(totalVolumes, dataset.getVolumes().size());
  assertEquals(totalVolumes, dataset.storageMap.size());

  Set<String> actualVolumes = new HashSet<String>();
  for (int i = 0; i < numNewVolumes; i++) {
    actualVolumes.add(
        dataset.getVolumes().get(numExistingVolumes + i).getBasePath());
  }
  assertEquals(actualVolumes.size(), expectedVolumes.size());
  assertTrue(actualVolumes.containsAll(expectedVolumes));
}
 
Example 15
Source File: DataNode.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
 * changed volumes.
 * @param newVolumes a comma separated string that specifies the data volumes.
 * @return changed volumes.
 * @throws IOException if none of the directories are specified in the
 * configuration.
 */
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
  Configuration conf = new Configuration();
  conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
  List<StorageLocation> locations = getStorageLocations(conf);

  if (locations.isEmpty()) {
    throw new IOException("No directory is specified.");
  }

  ChangedVolumes results = new ChangedVolumes();
  results.newLocations.addAll(locations);

  for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
       it.hasNext(); ) {
    Storage.StorageDirectory dir = it.next();
    boolean found = false;
    for (Iterator<StorageLocation> sl = results.newLocations.iterator();
         sl.hasNext(); ) {
      StorageLocation location = sl.next();
      if (location.getFile().getCanonicalPath().equals(
          dir.getRoot().getCanonicalPath())) {
        sl.remove();
        results.unchangedLocations.add(location);
        found = true;
        break;
      }
    }

    if (!found) {
      results.deactivateLocations.add(
          StorageLocation.parse(dir.getRoot().toString()));
    }
  }

  return results;
}
 
Example 16
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Removes a set of volumes from FsDataset.
 * @param volumesToRemove a set of absolute root path of each volume.
 * @param clearFailure set true to clear failure information.
 *
 * DataNode should call this function before calling
 * {@link DataStorage#removeVolumes(java.util.Collection)}.
 */
@Override
public synchronized void removeVolumes(
    Set<File> volumesToRemove, boolean clearFailure) {
  // Make sure that all volumes are absolute path.
  for (File vol : volumesToRemove) {
    Preconditions.checkArgument(vol.isAbsolute(),
        String.format("%s is not absolute path.", vol.getPath()));
  }
  for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
    Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
    final File absRoot = sd.getRoot().getAbsoluteFile();
    if (volumesToRemove.contains(absRoot)) {
      LOG.info("Removing " + absRoot + " from FsDataset.");

      // Disable the volume from the service.
      asyncDiskService.removeVolume(sd.getCurrentDir());
      volumes.removeVolume(absRoot, clearFailure);

      // Removed all replica information for the blocks on the volume. Unlike
      // updating the volumeMap in addVolume(), this operation does not scan
      // disks.
      for (String bpid : volumeMap.getBlockPoolList()) {
        for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
             it.hasNext(); ) {
          ReplicaInfo block = it.next();
          final File absBasePath =
              new File(block.getVolume().getBasePath()).getAbsoluteFile();
          if (absBasePath.equals(absRoot)) {
            invalidate(bpid, block);
            it.remove();
          }
        }
      }

      storageMap.remove(sd.getStorageUuid());
    }
  }
  setupAsyncLazyPersistThreads();
}
 
Example 17
Source File: TestDataNodeHotSwapVolumes.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testAddVolumeFailures() throws IOException {
  startDFSCluster(1, 1);
  final String dataDir = cluster.getDataDirectory();

  DataNode dn = cluster.getDataNodes().get(0);
  List<String> newDirs = Lists.newArrayList();
  final int NUM_NEW_DIRS = 4;
  for (int i = 0; i < NUM_NEW_DIRS; i++) {
    File newVolume = new File(dataDir, "new_vol" + i);
    newDirs.add(newVolume.toString());
    if (i % 2 == 0) {
      // Make addVolume() fail.
      newVolume.createNewFile();
    }
  }

  String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," +
      Joiner.on(",").join(newDirs);
  try {
    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
    fail("Expect to throw IOException.");
  } catch (ReconfigurationException e) {
    String errorMessage = e.getCause().getMessage();
    String messages[] = errorMessage.split("\\r?\\n");
    assertEquals(2, messages.length);
    assertThat(messages[0], containsString("new_vol0"));
    assertThat(messages[1], containsString("new_vol2"));
  }

  // Make sure that vol0 and vol2's metadata are not left in memory.
  FsDatasetSpi<?> dataset = dn.getFSDataset();
  for (FsVolumeSpi volume : dataset.getVolumes()) {
    assertThat(volume.getBasePath(), is(not(anyOf(
        is(newDirs.get(0)), is(newDirs.get(2))))));
  }
  DataStorage storage = dn.getStorage();
  for (int i = 0; i < storage.getNumStorageDirs(); i++) {
    Storage.StorageDirectory sd = storage.getStorageDir(i);
    assertThat(sd.getRoot().toString(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }

  // The newly effective conf does not have vol0 and vol2.
  String[] effectiveVolumes =
      dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
  assertEquals(4, effectiveVolumes.length);
  for (String ev : effectiveVolumes) {
    assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }
}
 
Example 18
Source File: TestFsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static Storage.StorageDirectory createStorageDirectory(File root) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  dsForStorageUuid.createStorageID(sd, false);
  return sd;
}
 
Example 19
Source File: TestFsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static Storage.StorageDirectory createStorageDirectory(File root) {
  Storage.StorageDirectory sd = new Storage.StorageDirectory(root);
  dsForStorageUuid.createStorageID(sd, false);
  return sd;
}
 
Example 20
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testAddVolumeFailures() throws IOException {
  startDFSCluster(1, 1);
  final String dataDir = cluster.getDataDirectory();

  DataNode dn = cluster.getDataNodes().get(0);
  List<String> newDirs = Lists.newArrayList();
  final int NUM_NEW_DIRS = 4;
  for (int i = 0; i < NUM_NEW_DIRS; i++) {
    File newVolume = new File(dataDir, "new_vol" + i);
    newDirs.add(newVolume.toString());
    if (i % 2 == 0) {
      // Make addVolume() fail.
      newVolume.createNewFile();
    }
  }

  String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," +
      Joiner.on(",").join(newDirs);
  try {
    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
    fail("Expect to throw IOException.");
  } catch (ReconfigurationException e) {
    String errorMessage = e.getCause().getMessage();
    String messages[] = errorMessage.split("\\r?\\n");
    assertEquals(2, messages.length);
    assertThat(messages[0], containsString("new_vol0"));
    assertThat(messages[1], containsString("new_vol2"));
  }

  // Make sure that vol0 and vol2's metadata are not left in memory.
  FsDatasetSpi<?> dataset = dn.getFSDataset();
  for (FsVolumeSpi volume : dataset.getVolumes()) {
    assertThat(volume.getBasePath(), is(not(anyOf(
        is(newDirs.get(0)), is(newDirs.get(2))))));
  }
  DataStorage storage = dn.getStorage();
  for (int i = 0; i < storage.getNumStorageDirs(); i++) {
    Storage.StorageDirectory sd = storage.getStorageDir(i);
    assertThat(sd.getRoot().toString(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }

  // The newly effective conf does not have vol0 and vol2.
  String[] effectiveVolumes =
      dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
  assertEquals(4, effectiveVolumes.length);
  for (String ev : effectiveVolumes) {
    assertThat(StorageLocation.parse(ev).getFile().getCanonicalPath(),
        is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
  }
}