Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlock#getStorageIDs()

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlock#getStorageIDs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PBHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static LocatedBlockProto convert(LocatedBlock b) {
  if (b == null) return null;
  Builder builder = LocatedBlockProto.newBuilder();
  DatanodeInfo[] locs = b.getLocations();
  List<DatanodeInfo> cachedLocs =
      Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
  for (int i = 0; i < locs.length; i++) {
    DatanodeInfo loc = locs[i];
    builder.addLocs(i, PBHelper.convert(loc));
    boolean locIsCached = cachedLocs.contains(loc);
    builder.addIsCached(locIsCached);
    if (locIsCached) {
      cachedLocs.remove(loc);
    }
  }
  Preconditions.checkArgument(cachedLocs.size() == 0,
      "Found additional cached replica locations that are not in the set of"
      + " storage-backed locations!");

  StorageType[] storageTypes = b.getStorageTypes();
  if (storageTypes != null) {
    for (int i = 0; i < storageTypes.length; ++i) {
      builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
    }
  }
  final String[] storageIDs = b.getStorageIDs();
  if (storageIDs != null) {
    builder.addAllStorageIDs(Arrays.asList(storageIDs));
  }

  return builder.setB(PBHelper.convert(b.getBlock()))
      .setBlockToken(PBHelper.convert(b.getBlockToken()))
      .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
 
Example 2
Source File: PBHelper.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static LocatedBlockProto convert(LocatedBlock b) {
  if (b == null) return null;
  Builder builder = LocatedBlockProto.newBuilder();
  DatanodeInfo[] locs = b.getLocations();
  List<DatanodeInfo> cachedLocs =
      Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
  for (int i = 0; i < locs.length; i++) {
    DatanodeInfo loc = locs[i];
    builder.addLocs(i, PBHelper.convert(loc));
    boolean locIsCached = cachedLocs.contains(loc);
    builder.addIsCached(locIsCached);
    if (locIsCached) {
      cachedLocs.remove(loc);
    }
  }
  Preconditions.checkArgument(cachedLocs.size() == 0,
      "Found additional cached replica locations that are not in the set of"
      + " storage-backed locations!");

  StorageType[] storageTypes = b.getStorageTypes();
  if (storageTypes != null) {
    for (int i = 0; i < storageTypes.length; ++i) {
      builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
    }
  }
  final String[] storageIDs = b.getStorageIDs();
  if (storageIDs != null) {
    builder.addAllStorageIDs(Arrays.asList(storageIDs));
  }

  return builder.setB(PBHelper.convert(b.getBlock()))
      .setBlockToken(PBHelper.convert(b.getBlockToken()))
      .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
 
Example 3
Source File: TestDatanodeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}
 
Example 4
Source File: TestDatanodeManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * This test creates a LocatedBlock with 5 locations, sorts the locations
 * based on the network topology, and ensures the locations are still aligned
 * with the storage ids and storage types.
 */
@Test
public void testSortLocatedBlocks() throws IOException {
  // create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

  // register 5 datanodes, each with different storage ID and type
  DatanodeInfo[] locs = new DatanodeInfo[5];
  String[] storageIDs = new String[5];
  StorageType[] storageTypes = new StorageType[]{
      StorageType.ARCHIVE,
      StorageType.DEFAULT,
      StorageType.DISK,
      StorageType.RAM_DISK,
      StorageType.SSD
  };
  for(int i = 0; i < 5; i++) {
    // register new datanode
    String uuid = "UUID-"+i;
    String ip = "IP-" + i;
    DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
    Mockito.when(dr.getDatanodeUuid()).thenReturn(uuid);
    Mockito.when(dr.getIpAddr()).thenReturn(ip);
    Mockito.when(dr.getXferAddr()).thenReturn(ip + ":9000");
    Mockito.when(dr.getXferPort()).thenReturn(9000);
    Mockito.when(dr.getSoftwareVersion()).thenReturn("version1");
    dm.registerDatanode(dr);

    // get location and storage information
    locs[i] = dm.getDatanode(uuid);
    storageIDs[i] = "storageID-"+i;
  }

  // set first 2 locations as decomissioned
  locs[0].setDecommissioned();
  locs[1].setDecommissioned();

  // create LocatedBlock with above locations
  ExtendedBlock b = new ExtendedBlock("somePoolID", 1234);
  LocatedBlock block = new LocatedBlock(b, locs, storageIDs, storageTypes);
  List<LocatedBlock> blocks = new ArrayList<>();
  blocks.add(block);

  final String targetIp = locs[4].getIpAddr();

  // sort block locations
  dm.sortLocatedBlocks(targetIp, blocks);

  // check that storage IDs/types are aligned with datanode locs
  DatanodeInfo[] sortedLocs = block.getLocations();
  storageIDs = block.getStorageIDs();
  storageTypes = block.getStorageTypes();
  assertThat(sortedLocs.length, is(5));
  assertThat(storageIDs.length, is(5));
  assertThat(storageTypes.length, is(5));
  for(int i = 0; i < sortedLocs.length; i++) {
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageID(), is(storageIDs[i]));
    assertThat(((DatanodeInfoWithStorage)sortedLocs[i]).getStorageType(), is(storageTypes[i]));
  }

  // Ensure the local node is first.
  assertThat(sortedLocs[0].getIpAddr(), is(targetIp));

  // Ensure the two decommissioned DNs were moved to the end.
  assertThat(sortedLocs[sortedLocs.length-1].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
  assertThat(sortedLocs[sortedLocs.length-2].getAdminState(),
      is(DatanodeInfo.AdminStates.DECOMMISSIONED));
}