Java Code Examples for org.apache.hadoop.net.NodeBase

The following examples show how to use org.apache.hadoop.net.NodeBase. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
OneBlockInfo(Path path, long offset, long len,
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i],
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 2
Source Project: hadoop   Source File: CombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], 
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 3
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
Example 4
Source Project: big-c   Source File: CombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], 
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 5
Source Project: big-c   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
Example 6
Source Project: sqoop-on-spark   Source File: HdfsPartitioner.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len,
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i],
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 7
Source Project: RDFS   Source File: CombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len,
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file ystem does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], NetworkTopology.DEFAULT_RACK)).
                                      toString();
    }
  }

  // The topology paths have the host name included as the last
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 8
Source Project: RDFS   Source File: TopologyCache.java    License: Apache License 2.0 6 votes vote down vote up
private Node resolveAndGetNode(String name) {
  List <String> rNameList = dnsToSwitchMapping.resolve(Arrays.asList(new String [] {name}));
  String networkLoc = NodeBase.normalize(rNameList.get(0));
  Node node = null;

  // we depend on clusterMap to get a canonical node object
  // we synchronize this section to guarantee that two concurrent
  // insertions into the clusterMap don't happen (resulting in
  // multiple copies of the same node being created and returned)
  synchronized (clusterMap) {
    while ((node = clusterMap.getNode(networkLoc+"/"+name)) == null) {
      clusterMap.add(new NodeBase(name, networkLoc));
    }
  }

  return node;
}
 
Example 9
Source Project: RDFS   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
protected DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  Node oldNode = excludedNodes.put(localMachine, localMachine);
  if (oldNode == null) { // was not in the excluded list
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example 10
Source Project: hraven   Source File: CombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file system does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], 
                          NetworkTopology.DEFAULT_RACK)).toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 11
Source Project: hadoop-gpu   Source File: CombineFileInputFormat.java    License: Apache License 2.0 6 votes vote down vote up
OneBlockInfo(Path path, long offset, long len, 
             String[] hosts, String[] topologyPaths) {
  this.onepath = path;
  this.offset = offset;
  this.hosts = hosts;
  this.length = len;
  assert (hosts.length == topologyPaths.length ||
          topologyPaths.length == 0);

  // if the file ystem does not have any rack information, then
  // use dummy rack location.
  if (topologyPaths.length == 0) {
    topologyPaths = new String[hosts.length];
    for (int i = 0; i < topologyPaths.length; i++) {
      topologyPaths[i] = (new NodeBase(hosts[i], NetworkTopology.DEFAULT_RACK)).
                                      toString();
    }
  }

  // The topology paths have the host name included as the last 
  // component. Strip it.
  this.racks = new String[topologyPaths.length];
  for (int i = 0; i < topologyPaths.length; i++) {
    this.racks[i] = (new NodeBase(topologyPaths[i])).getNetworkLocation();
  }
}
 
Example 12
Source Project: hadoop-gpu   Source File: ReplicationTargetChooser.java    License: Apache License 2.0 6 votes vote down vote up
private DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           List<Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  if (!excludedNodes.contains(localMachine)) {
    excludedNodes.add(localMachine);
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example 13
Source Project: hadoop   Source File: RackResolver.java    License: Apache License 2.0 5 votes vote down vote up
private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    LOG.info("Couldn't resolve " + hostName + ". Falling back to "
        + NetworkTopology.DEFAULT_RACK);
  } else {
    rName = rNameList.get(0);
    LOG.info("Resolved " + hostName + " to " + rName);
  }
  return new NodeBase(hostName, rName);
}
 
Example 14
Source Project: hadoop   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected DatanodeStorageInfo chooseLocalRack(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local rack, but off-nodegroup
  try {
    final String scope = NetworkTopology.getFirstHalf(localMachine.getNetworkLocation());
    return chooseRandom(scope, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e1) {
    // find the second replica
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes,
            blocksize, maxNodesPerRack, results, avoidStaleNodes,
            storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example 15
Source Project: hadoop   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
private DatanodeStorageInfo chooseLocalNodeGroup(
    NetworkTopologyWithNodeGroup clusterMap, Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local node group
  try {
    return chooseRandom(
        clusterMap.getNodeGroup(localMachine.getNetworkLocation()),
        excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes,
        storageTypes);
  } catch (NotEnoughReplicasException e1) {
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getNodeGroup(newLocal.getNetworkLocation()),
            excludedNodes, blocksize, maxNodesPerRack, results,
            avoidStaleNodes, storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example 16
Source Project: big-c   Source File: RackResolver.java    License: Apache License 2.0 5 votes vote down vote up
private static Node coreResolve(String hostName) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(hostName);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = null;
  if (rNameList == null || rNameList.get(0) == null) {
    rName = NetworkTopology.DEFAULT_RACK;
    LOG.info("Couldn't resolve " + hostName + ". Falling back to "
        + NetworkTopology.DEFAULT_RACK);
  } else {
    rName = rNameList.get(0);
    LOG.info("Resolved " + hostName + " to " + rName);
  }
  return new NodeBase(hostName, rName);
}
 
Example 17
Source Project: big-c   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected DatanodeStorageInfo chooseLocalRack(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local rack, but off-nodegroup
  try {
    final String scope = NetworkTopology.getFirstHalf(localMachine.getNetworkLocation());
    return chooseRandom(scope, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e1) {
    // find the second replica
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes,
            blocksize, maxNodesPerRack, results, avoidStaleNodes,
            storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example 18
Source Project: big-c   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
private DatanodeStorageInfo chooseLocalNodeGroup(
    NetworkTopologyWithNodeGroup clusterMap, Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local node group
  try {
    return chooseRandom(
        clusterMap.getNodeGroup(localMachine.getNetworkLocation()),
        excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes,
        storageTypes);
  } catch (NotEnoughReplicasException e1) {
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getNodeGroup(newLocal.getNetworkLocation()),
            excludedNodes, blocksize, maxNodesPerRack, results,
            avoidStaleNodes, storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example 19
@Before
public void setUp() throws Exception {
  NodeStore store = new NodeStore();
  NodeIdProto nodeId = NodeIdProto.newBuilder().setHost("localhost").setPort(8000).build();
  RMNode rmNode = new RMNodeImpl(new NodeIdPBImpl(nodeId), new MockRMContext(), "localhost", 8000, 8070, new NodeBase(),
          new ResourcePBImpl(), "1.0");
  SchedulerNode node = new FiCaSchedulerNode(rmNode, false);
  store.add(node);
  manager = new OfferLifecycleManager(store, new MyriadDriver(new MockSchedulerDriver()));
}
 
Example 20
Source Project: RDFS   Source File: JobTracker.java    License: Apache License 2.0 5 votes vote down vote up
public Node resolveAndAddToTopology(String name) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(name);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = rNameList.get(0);
  String networkLoc = NodeBase.normalize(rName);
  return addHostToNodeMapping(name, networkLoc);
}
 
Example 21
Source Project: hadoop-gpu   Source File: JobTracker.java    License: Apache License 2.0 5 votes vote down vote up
public Node resolveAndAddToTopology(String name) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(name);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = rNameList.get(0);
  String networkLoc = NodeBase.normalize(rName);
  return addHostToNodeMapping(name, networkLoc);
}
 
Example 22
Source Project: hadoop   Source File: DatanodeInfo.java    License: Apache License 2.0 4 votes vote down vote up
/** Sets the network location */
public synchronized void setNetworkLocation(String location) {
  this.location = NodeBase.normalize(location);
}
 
Example 23
Source Project: hadoop   Source File: NamenodeWebHdfsMethods.java    License: Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}
 
Example 24
Source Project: hadoop   Source File: NamenodeJspHelper.java    License: Apache License 2.0 4 votes vote down vote up
/** @return a randomly chosen datanode. */
static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
  return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
      ).getDatanodeManager().getNetworkTopology().chooseRandom(
      NodeBase.ROOT);
}
 
Example 25
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Choose <i>localMachine</i> as the target.
 * if <i>localMachine</i> is not available, 
 * choose a node on the same rack
 * @return the chosen storage
 */
protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
    throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
  if (preferLocalNode && localMachine instanceof DatanodeDescriptor) {
    DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
    // otherwise try local machine first
    if (excludedNodes.add(localMachine)) { // was not in the excluded list
      for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
          .entrySet().iterator(); iter.hasNext(); ) {
        Map.Entry<StorageType, Integer> entry = iter.next();
        for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
            localDatanode.getStorageInfos())) {
          StorageType type = entry.getKey();
          if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
              maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) {
            int num = entry.getValue();
            if (num == 1) {
              iter.remove();
            } else {
              entry.setValue(num - 1);
            }
            return localStorage;
          }
        }
      }
    } 
  }

  if (!fallbackToLocalRack) {
    return null;
  }
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, blocksize,
      maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}
 
Example 26
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Choose one node from the rack that <i>localMachine</i> is on.
 * if no such node is available, choose one node from the rack where
 * a second replica is on.
 * if still no such node is available, choose a random node 
 * in the cluster.
 * @return the chosen node
 */
protected DatanodeStorageInfo chooseLocalRack(Node localMachine,
                                              Set<Node> excludedNodes,
                                              long blocksize,
                                              int maxNodesPerRack,
                                              List<DatanodeStorageInfo> results,
                                              boolean avoidStaleNodes,
                                              EnumMap<StorageType, Integer> storageTypes)
    throws NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
  final String localRack = localMachine.getNetworkLocation();
    
  try {
    // choose one from the local rack
    return chooseRandom(localRack, excludedNodes,
        blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e) {
    // find the next replica and retry with its rack
    for(DatanodeStorageInfo resultStorage : results) {
      DatanodeDescriptor nextNode = resultStorage.getDatanodeDescriptor();
      if (nextNode != localMachine) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Failed to choose from local rack (location = " + localRack
              + "), retry with the rack of the next replica (location = "
              + nextNode.getNetworkLocation() + ")", e);
        }
        return chooseFromNextRack(nextNode, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from local rack (location = " + localRack
          + "); the second replica is not found, retry choosing ramdomly", e);
    }
    //the second replica is not found, randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
Example 27
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>.
 * @return the first chosen node, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(int numOfReplicas,
                          String scope,
                          Set<Node> excludedNodes,
                          long blocksize,
                          int maxNodesPerRack,
                          List<DatanodeStorageInfo> results,
                          boolean avoidStaleNodes,
                          boolean considerDfsUsedPercent,
                          EnumMap<StorageType, Integer> storageTypes)
                          throws NotEnoughReplicasException {
    
  int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
      scope, excludedNodes);
  StringBuilder builder = null;
  if (LOG.isDebugEnabled()) {
    builder = debugLoggingBuilder.get();
    builder.setLength(0);
    builder.append("[");
  }

  Set<Node> dfsUsedPercentExcludedNodes = new HashSet<Node>();

  boolean badTarget = false;
  DatanodeStorageInfo firstChosen = null;
  while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
    DatanodeDescriptor chosenNode = 
        (DatanodeDescriptor)clusterMap.chooseRandom(scope);
    if (!dfsUsedPercentExcludedNodes.contains(chosenNode) && !excludedNodes.contains(chosenNode)) { //was not in the excluded list
      if (LOG.isDebugEnabled()) {
        builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" [");
      }
      numOfAvailableNodes--;

      if(!isEnoughDfsUsedPercent(chosenNode, considerDfsUsedPercent)){
        dfsUsedPercentExcludedNodes.add(chosenNode);
        continue;
      }
      excludedNodes.add(chosenNode);

      final DatanodeStorageInfo[] storages = DFSUtil.shuffle(
          chosenNode.getStorageInfos());
      int i = 0;
      boolean search = true;
      for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
          .entrySet().iterator(); search && iter.hasNext(); ) {
        Map.Entry<StorageType, Integer> entry = iter.next();
        for (i = 0; i < storages.length; i++) {
          StorageType type = entry.getKey();
          final int newExcludedNodes = addIfIsGoodTarget(storages[i],
              excludedNodes, blocksize, maxNodesPerRack, considerLoad, results,
              avoidStaleNodes, type);
          if (newExcludedNodes >= 0) {
            numOfReplicas--;
            if (firstChosen == null) {
              firstChosen = storages[i];
            }
            numOfAvailableNodes -= newExcludedNodes;
            int num = entry.getValue();
            if (num == 1) {
              iter.remove();
            } else {
              entry.setValue(num - 1);
            }
            search = false;
            break;
          }
        }
      }
      if (LOG.isDebugEnabled()) {
        builder.append("\n]");
      }

      // If no candidate storage was found on this DN then set badTarget.
      badTarget = (i == storages.length);
    }
  }
    
  if (numOfReplicas>0) {
    String detail = enableDebugLogging;
    if (LOG.isDebugEnabled()) {
      if (badTarget && builder != null) {
        detail = builder.toString();
        builder.setLength(0);
      } else {
        detail = "";
      }
    }
    if(dfsUsedPercentExcludedNodes.size() > 0 && (scope.startsWith("~") || scope.equals(NodeBase.ROOT))){
      return null;
    } else {
      throw new NotEnoughReplicasException(detail);
    }
  }
  
  return firstChosen;
}
 
Example 28
Source Project: hadoop   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 4 votes vote down vote up
/** choose local node of localMachine as the target.
 * if localMachine is not available, choose a node on the same nodegroup or 
 * rack instead.
 * @return the chosen node
 */
@Override
protected DatanodeStorageInfo chooseLocalStorage(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack)
    throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
        blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);

  // otherwise try local machine first
  if (localMachine instanceof DatanodeDescriptor) {
    DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine;
    if (excludedNodes.add(localMachine)) { // was not in the excluded list
      for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes
          .entrySet().iterator(); iter.hasNext(); ) {
        Map.Entry<StorageType, Integer> entry = iter.next();
        for (DatanodeStorageInfo localStorage : DFSUtil.shuffle(
            localDataNode.getStorageInfos())) {
          StorageType type = entry.getKey();
          if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize,
              maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) {
            int num = entry.getValue();
            if (num == 1) {
              iter.remove();
            } else {
              entry.setValue(num - 1);
            }
            return localStorage;
          }
        }
      }
    }
  }

  // try a node on local node group
  DatanodeStorageInfo chosenStorage = chooseLocalNodeGroup(
      (NetworkTopologyWithNodeGroup)clusterMap, localMachine, excludedNodes, 
      blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  if (chosenStorage != null) {
    return chosenStorage;
  }

  if (!fallbackToLocalRack) {
    return null;
  }
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
      blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes);
}
 
Example 29
Source Project: big-c   Source File: DatanodeInfo.java    License: Apache License 2.0 4 votes vote down vote up
/** Sets the network location */
public synchronized void setNetworkLocation(String location) {
  this.location = NodeBase.normalize(location);
}
 
Example 30
Source Project: big-c   Source File: NamenodeWebHdfsMethods.java    License: Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
    final String path, final HttpOpParam.Op op, final long openOffset,
    final long blocksize, final String excludeDatanodes) throws IOException {
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  
  HashSet<Node> excludes = new HashSet<Node>();
  if (excludeDatanodes != null) {
    for (String host : StringUtils
        .getTrimmedStringCollection(excludeDatanodes)) {
      int idx = host.indexOf(":");
      if (idx != -1) {          
        excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
            host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
      } else {
        excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
      }
    }
  }

  if (op == PutOpParam.Op.CREATE) {
    //choose a datanode near to client 
    final DatanodeDescriptor clientNode = bm.getDatanodeManager(
        ).getDatanodeByHost(getRemoteAddress());
    if (clientNode != null) {
      final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
          path, clientNode, excludes, blocksize);
      if (storages.length > 0) {
        return storages[0].getDatanodeDescriptor();
      }
    }
  } else if (op == GetOpParam.Op.OPEN
      || op == GetOpParam.Op.GETFILECHECKSUM
      || op == PostOpParam.Op.APPEND) {
    //choose a datanode containing a replica 
    final NamenodeProtocols np = getRPCServer(namenode);
    final HdfsFileStatus status = np.getFileInfo(path);
    if (status == null) {
      throw new FileNotFoundException("File " + path + " not found.");
    }
    final long len = status.getLen();
    if (op == GetOpParam.Op.OPEN) {
      if (openOffset < 0L || (openOffset >= len && len > 0)) {
        throw new IOException("Offset=" + openOffset
            + " out of the range [0, " + len + "); " + op + ", path=" + path);
      }
    }

    if (len > 0) {
      final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
      final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
      final int count = locations.locatedBlockCount();
      if (count > 0) {
        return bestNode(locations.get(0).getLocations(), excludes);
      }
    }
  } 

  return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
      ).chooseRandom(NodeBase.ROOT);
}