Java Code Examples for org.apache.hadoop.fs.StorageType#DEFAULT

The following examples show how to use org.apache.hadoop.fs.StorageType#DEFAULT . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFsVolumeList.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testGetNextVolumeWithClosedVolume() throws IOException {
  FsVolumeList volumeList = new FsVolumeList(
      Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
  List<FsVolumeImpl> volumes = new ArrayList<>();
  for (int i = 0; i < 3; i++) {
    File curDir = new File(baseDir, "nextvolume-" + i);
    curDir.mkdirs();
    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
        conf, StorageType.DEFAULT);
    volume.setCapacityForTesting(1024 * 1024 * 1024);
    volumes.add(volume);
    volumeList.addVolume(volume.obtainReference());
  }

  // Close the second volume.
  volumes.get(1).closeAndWait();
  for (int i = 0; i < 10; i++) {
    try (FsVolumeReference ref =
        volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
      // volume No.2 will not be chosen.
      assertNotEquals(ref.getVolume(), volumes.get(1));
    }
  }
}
 
Example 2
Source File: StorageLocation.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Attempt to parse a storage uri with storage class and URI. The storage
 * class component of the uri is case-insensitive.
 *
 * @param rawLocation Location string of the format [type]uri, where [type] is
 *                    optional.
 * @return A StorageLocation object if successfully parsed, null otherwise.
 *         Does not throw any exceptions.
 */
public static StorageLocation parse(String rawLocation)
    throws IOException, SecurityException {
  Matcher matcher = regex.matcher(rawLocation);
  StorageType storageType = StorageType.DEFAULT;
  String location = rawLocation;

  if (matcher.matches()) {
    String classString = matcher.group(1);
    location = matcher.group(2);
    if (!classString.isEmpty()) {
      storageType =
          StorageType.valueOf(StringUtils.toUpperCase(classString));
    }
  }

  return new StorageLocation(storageType, new Path(location).toUri());
}
 
Example 3
Source File: TestFsVolumeList.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
  FsVolumeList volumeList = new FsVolumeList(
      Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
  File volDir = new File(baseDir, "volume-0");
  volDir.mkdirs();
  FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
      conf, StorageType.DEFAULT);
  FsVolumeReference ref = volume.obtainReference();
  volumeList.addVolume(ref);
  try {
    ref.close();
    fail("Should throw exception because the reference is closed in "
        + "VolumeList#addVolume().");
  } catch (IllegalStateException e) {
  }
}
 
Example 4
Source File: TestFsVolumeList.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
  FsVolumeList volumeList = new FsVolumeList(
      Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
  File volDir = new File(baseDir, "volume-0");
  volDir.mkdirs();
  FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
      conf, StorageType.DEFAULT);
  FsVolumeReference ref = volume.obtainReference();
  volumeList.addVolume(ref);
  try {
    ref.close();
    fail("Should throw exception because the reference is closed in "
        + "VolumeList#addVolume().");
  } catch (IllegalStateException e) {
  }
}
 
Example 5
Source File: TestFsVolumeList.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testCheckDirsWithClosedVolume() throws IOException {
  FsVolumeList volumeList = new FsVolumeList(
      Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
  List<FsVolumeImpl> volumes = new ArrayList<>();
  for (int i = 0; i < 3; i++) {
    File curDir = new File(baseDir, "volume-" + i);
    curDir.mkdirs();
    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", curDir,
        conf, StorageType.DEFAULT);
    volumes.add(volume);
    volumeList.addVolume(volume.obtainReference());
  }

  // Close the 2nd volume.
  volumes.get(1).closeAndWait();
  // checkDirs() should ignore the 2nd volume since it is closed.
  volumeList.checkDirs();
}
 
Example 6
Source File: DFSTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static DatanodeStorageInfo[] createDatanodeStorageInfos(
    int n, String[] racks, String[] hostnames, StorageType[] types) {
  DatanodeStorageInfo[] storages = new DatanodeStorageInfo[n];
  for(int i = storages.length; i > 0; ) {
    final String storageID = "s" + i;
    final String ip = i + "." + i + "." + i + "." + i;
    i--;
    final String rack = (racks!=null && i < racks.length)? racks[i]: "defaultRack";
    final String hostname = (hostnames!=null && i < hostnames.length)? hostnames[i]: "host";
    final StorageType type = (types != null && i < types.length) ? types[i]
        : StorageType.DEFAULT;
    storages[i] = createDatanodeStorageInfo(storageID, ip, rack, hostname,
        type);
  }
  return storages;
}
 
Example 7
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private StorageType getStorageTypeFromLocations(
    Collection<StorageLocation> dataLocations, File dir) {
  for (StorageLocation dataLocation : dataLocations) {
    if (dataLocation.getFile().equals(dir)) {
      return dataLocation.getStorageType();
    }
  }
  return StorageType.DEFAULT;
}
 
Example 8
Source File: TestPBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testConvertBlockCommand() {
  Block[] blocks = new Block[] { new Block(21), new Block(22) };
  DatanodeInfo[][] dnInfos = new DatanodeInfo[][] { new DatanodeInfo[1],
      new DatanodeInfo[2] };
  dnInfos[0][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][0] = DFSTestUtil.getLocalDatanodeInfo();
  dnInfos[1][1] = DFSTestUtil.getLocalDatanodeInfo();
  String[][] storageIDs = {{"s00"}, {"s10", "s11"}};
  StorageType[][] storageTypes = {{StorageType.DEFAULT},
      {StorageType.DEFAULT, StorageType.DEFAULT}};
  BlockCommand bc = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, "bp1",
      blocks, dnInfos, storageTypes, storageIDs);
  BlockCommandProto bcProto = PBHelper.convert(bc);
  BlockCommand bc2 = PBHelper.convert(bcProto);
  assertEquals(bc.getAction(), bc2.getAction());
  assertEquals(bc.getBlocks().length, bc2.getBlocks().length);
  Block[] blocks2 = bc2.getBlocks();
  for (int i = 0; i < blocks.length; i++) {
    assertEquals(blocks[i], blocks2[i]);
  }
  DatanodeInfo[][] dnInfos2 = bc2.getTargets();
  assertEquals(dnInfos.length, dnInfos2.length);
  for (int i = 0; i < dnInfos.length; i++) {
    DatanodeInfo[] d1 = dnInfos[i];
    DatanodeInfo[] d2 = dnInfos2[i];
    assertEquals(d1.length, d2.length);
    for (int j = 0; j < d1.length; j++) {
      compare(d1[j], d2[j]);
    }
  }
}
 
Example 9
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 5 votes vote down vote up
private StorageType getStorageTypeFromLocations(
    Collection<StorageLocation> dataLocations, File dir) {
  for (StorageLocation dataLocation : dataLocations) {
    if (dataLocation.getFile().equals(dir)) {
      return dataLocation.getStorageType();
    }
  }
  return StorageType.DEFAULT;
}
 
Example 10
Source File: VolumeInfo.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
private VolumeInfo(Builder b) throws IOException {

    this.rootDir = b.rootDir;
    File root = new File(this.rootDir);

    boolean succeeded = root.isDirectory() || root.mkdirs();

    if (!succeeded) {
      LOG.error("Unable to create the volume root dir at : {}", root);
      throw new IOException("Unable to create the volume root dir at " + root);
    }

    this.storageType = (b.storageType != null ?
        b.storageType : StorageType.DEFAULT);

    this.configuredCapacity = (b.configuredCapacity != 0 ?
        b.configuredCapacity : -1);

    SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
    if (usageCheckFactory == null) {
      usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
    }
    SpaceUsageCheckParams checkParams =
        usageCheckFactory.paramsFor(root);

    this.usage = new VolumeUsage(checkParams);
  }
 
Example 11
Source File: ExternalVolumeImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public StorageType getStorageType() {
  return StorageType.DEFAULT;
}
 
Example 12
Source File: TestDirectoryScanner.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public StorageType getStorageType() {
  return StorageType.DEFAULT;
}
 
Example 13
Source File: TestDatanodeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 14
Source File: HddsVolume.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
public StorageType getStorageType() {
  if(volumeInfo != null) {
    return volumeInfo.getStorageType();
  }
  return StorageType.DEFAULT;
}
 
Example 15
Source File: DatanodeStorage.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
 */
public DatanodeStorage(String storageID) {
  this(storageID, State.NORMAL, StorageType.DEFAULT);
}
 
Example 16
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
void resetSubFields() {
  src = null;
  dsQuota = -1L;
  type = StorageType.DEFAULT;
}
 
Example 17
Source File: SimulatedFSDataset.java    From big-c with Apache License 2.0 4 votes vote down vote up
SimulatedStorage(long cap, DatanodeStorage.State state) {
  capacity = cap;
  dnStorage = new DatanodeStorage(
      "SimulatedStorage-" + DatanodeStorage.generateUuid(),
      state, StorageType.DEFAULT);
}
 
Example 18
Source File: TestDirectoryScanner.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public StorageType getStorageType() {
  return StorageType.DEFAULT;
}
 
Example 19
Source File: FSEditLogOp.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
void resetSubFields() {
  src = null;
  dsQuota = -1L;
  type = StorageType.DEFAULT;
}
 
Example 20
Source File: DatanodeStorage.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Create a storage with {@link State#NORMAL} and {@link StorageType#DEFAULT}.
 */
public DatanodeStorage(String storageID) {
  this(storageID, State.NORMAL, StorageType.DEFAULT);
}