Java Code Examples for org.apache.hadoop.hdfs.server.datanode.DataNode#getStorageLocations()

The following examples show how to use org.apache.hadoop.hdfs.server.datanode.DataNode#getStorageLocations() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsDatasetImpl.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
Example 2
Source File: TestDFSAdmin.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000)
public void testGetReconfigureStatus()
    throws IOException, InterruptedException {
  ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
  datanode.setReconfigurationUtil(ru);

  List<ReconfigurationUtil.PropertyChange> changes =
      new ArrayList<ReconfigurationUtil.PropertyChange>();
  File newDir = new File(cluster.getDataDirectory(), "data_new");
  newDir.mkdirs();
  changes.add(new ReconfigurationUtil.PropertyChange(
      DFS_DATANODE_DATA_DIR_KEY, newDir.toString(),
      datanode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
  changes.add(new ReconfigurationUtil.PropertyChange(
      "randomKey", "new123", "old456"));
  when(ru.parseChangedProperties(any(Configuration.class),
      any(Configuration.class))).thenReturn(changes);

  final int port = datanode.getIpcPort();
  final String address = "localhost:" + port;

  assertThat(admin.startReconfiguration("datanode", address), is(0));

  List<String> outputs = null;
  int count = 100;
  while (count > 0) {
    outputs = getReconfigureStatus("datanode", address);
    if (!outputs.isEmpty() && outputs.get(0).contains("finished")) {
      break;
    }
    count--;
    Thread.sleep(100);
  }
  assertTrue(count > 0);
  assertThat(outputs.size(), is(8));  // 3 (SUCCESS) + 4 (FAILED)

  List<StorageLocation> locations = DataNode.getStorageLocations(
      datanode.getConf());
  assertThat(locations.size(), is(1));
  assertThat(locations.get(0).getFile(), is(newDir));
  // Verify the directory is appropriately formatted.
  assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());

  int successOffset = outputs.get(1).startsWith("SUCCESS:") ? 1 : 5;
  int failedOffset = outputs.get(1).startsWith("FAILED:") ? 1: 4;
  assertThat(outputs.get(successOffset),
      containsString("Change property " + DFS_DATANODE_DATA_DIR_KEY));
  assertThat(outputs.get(successOffset + 1),
      is(allOf(containsString("From:"), containsString("data1"),
          containsString("data2"))));
  assertThat(outputs.get(successOffset + 2),
      is(not(anyOf(containsString("data1"), containsString("data2")))));
  assertThat(outputs.get(successOffset + 2),
      is(allOf(containsString("To"), containsString("data_new"))));
  assertThat(outputs.get(failedOffset),
      containsString("Change property randomKey"));
  assertThat(outputs.get(failedOffset + 1),
      containsString("From: \"old456\""));
  assertThat(outputs.get(failedOffset + 2),
      containsString("To: \"new123\""));
}
 
Example 3
Source File: FsDatasetImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
Example 4
Source File: TestDFSAdmin.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000)
public void testGetReconfigureStatus()
    throws IOException, InterruptedException {
  ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
  datanode.setReconfigurationUtil(ru);

  List<ReconfigurationUtil.PropertyChange> changes =
      new ArrayList<ReconfigurationUtil.PropertyChange>();
  File newDir = new File(cluster.getDataDirectory(), "data_new");
  newDir.mkdirs();
  changes.add(new ReconfigurationUtil.PropertyChange(
      DFS_DATANODE_DATA_DIR_KEY, newDir.toString(),
      datanode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
  changes.add(new ReconfigurationUtil.PropertyChange(
      "randomKey", "new123", "old456"));
  when(ru.parseChangedProperties(any(Configuration.class),
      any(Configuration.class))).thenReturn(changes);

  final int port = datanode.getIpcPort();
  final String address = "localhost:" + port;

  assertThat(admin.startReconfiguration("datanode", address), is(0));

  List<String> outputs = null;
  int count = 100;
  while (count > 0) {
    outputs = getReconfigureStatus("datanode", address);
    if (!outputs.isEmpty() && outputs.get(0).contains("finished")) {
      break;
    }
    count--;
    Thread.sleep(100);
  }
  assertTrue(count > 0);
  assertThat(outputs.size(), is(8));  // 3 (SUCCESS) + 4 (FAILED)

  List<StorageLocation> locations = DataNode.getStorageLocations(
      datanode.getConf());
  assertThat(locations.size(), is(1));
  assertThat(locations.get(0).getFile(), is(newDir));
  // Verify the directory is appropriately formatted.
  assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());

  int successOffset = outputs.get(1).startsWith("SUCCESS:") ? 1 : 5;
  int failedOffset = outputs.get(1).startsWith("FAILED:") ? 1: 4;
  assertThat(outputs.get(successOffset),
      containsString("Change property " + DFS_DATANODE_DATA_DIR_KEY));
  assertThat(outputs.get(successOffset + 1),
      is(allOf(containsString("From:"), containsString("data1"),
          containsString("data2"))));
  assertThat(outputs.get(successOffset + 2),
      is(not(anyOf(containsString("data1"), containsString("data2")))));
  assertThat(outputs.get(successOffset + 2),
      is(allOf(containsString("To"), containsString("data_new"))));
  assertThat(outputs.get(failedOffset),
      containsString("Change property randomKey"));
  assertThat(outputs.get(failedOffset + 1),
      containsString("From: \"old456\""));
  assertThat(outputs.get(failedOffset + 2),
      containsString("To: \"new123\""));
}