org.apache.hadoop.net.Node Java Examples

The following examples show how to use org.apache.hadoop.net.Node. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlockPlacementPolicyDefault.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
Example #2
Source File: TestDeleteRace.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
Example #3
Source File: BlockPlacementPolicyDefault.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * If the given storage is a good target, add it to the result list and
 * update the set of excluded nodes.
 * @return -1 if the given is not a good target;
 *         otherwise, return the number of nodes added to excludedNodes set.
 */
int addIfIsGoodTarget(DatanodeStorageInfo storage,
    Set<Node> excludedNodes,
    long blockSize,
    int maxNodesPerRack,
    boolean considerLoad,
    List<DatanodeStorageInfo> results,                           
    boolean avoidStaleNodes,
    StorageType storageType) {
  if (isGoodTarget(storage, blockSize, maxNodesPerRack, considerLoad,
      results, avoidStaleNodes, storageType)) {
    results.add(storage);
    // add node and related nodes to excludedNode
    return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
  } else { 
    return -1;
  }
}
 
Example #4
Source File: NameNodeRpcServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
Example #5
Source File: JobInProgress.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Get the level of locality that a given task would have if launched on
 * a particular TaskTracker. Returns 0 if the task has data on that machine,
 * 1 if it has data on the same rack, etc (depending on number of levels in
 * the network hierarchy).
 */
int getLocalityLevel(TaskInProgress tip, TaskTrackerStatus tts) {
  Node tracker = jobtracker.getNode(tts.getHost());
  int level = this.maxLevel;
  // find the right level across split locations
  for (String local : maps[tip.getIdWithinJob()].getSplitLocations()) {
    Node datanode = jobtracker.getNode(local);
    int newLevel = this.maxLevel;
    if (tracker != null && datanode != null) {
      newLevel = getMatchingLevelForNodes(tracker, datanode);
    }
    if (newLevel < level) {
      level = newLevel;
      // an optimization
      if (level == 0) {
        break;
      }
    }
  }
  return level;
}
 
Example #6
Source File: TestConfigurableBlockPlacement.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testChooseTargetWithDefaultRack() throws Exception {
  VerifiablePolicy policy = initTest();
  TestMapping mapping = (TestMapping) policy.dnsToSwitchMapping;
  mapping.assignDefaultRack = true;
  try {
    policy.hostsUpdated();
    fail("Did not throw : " + DefaultRackException.class);
  } catch (DefaultRackException e) {}

  // Verify there is no default rack.
  RackRingInfo info = policy.racksMap.get(NetworkTopology.DEFAULT_RACK);
  assertNull(info);

  HashMap<Node, Node> emptyMap = new HashMap<Node, Node>();
  List<DatanodeDescriptor> results = new ArrayList<DatanodeDescriptor>();
  for (int i = 0; i < dataNodes.length; i++) {
    policy.chooseTarget(3, dataNodes[i], emptyMap, 512, 4, results, true);
  }
}
 
Example #7
Source File: JobInProgress.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void refreshCandidateSpeculativeMaps(long now) {
  if (!hasSpeculativeMaps()) {
    return;
  }
  //////// Populate allTips with all TaskInProgress
  Set<TaskInProgress> allTips = new HashSet<TaskInProgress>();

  // collection of node at max level in the cache structure
  Collection<Node> nodesAtMaxLevel = jobtracker.getNodesAtMaxLevel();
  // Add all tasks from max-level nodes breadth-wise
  for (Node parent : nodesAtMaxLevel) {
    Set<TaskInProgress> cache = runningMapCache.get(parent);
    if (cache != null) {
      allTips.addAll(cache);
    }
  }
  // Add all non-local TIPs
  allTips.addAll(nonLocalRunningMaps);

  // update the progress rates of all the candidate tips ..
  for (TaskInProgress tip: allTips) {
    tip.updateProgressRate(now);
  }

  candidateSpeculativeMaps = findSpeculativeTaskCandidates(allTips);
}
 
Example #8
Source File: BlockPlacementPolicyDefault.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * all the chosen nodes are on the same rack, choose a node on a new rack for
 * the next replica according to where the writer is
 */
private void choose2ndRack(DatanodeDescriptor writer,
    HashMap<Node, Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeDescriptor> results) throws NotEnoughReplicasException {
  if (!clusterMap.isOnSameRack(writer, results.get(0))) {
    DatanodeDescriptor localNode = chooseLocalNode(writer, excludedNodes,
        blocksize, maxNodesPerRack, results);
    if (clusterMap.isOnSameRack(localNode, results.get(0))) {
      // should not put 2nd replica on the same rack as the first replica
      results.remove(localNode); 
    } else {
      return;
    }
  }
  chooseRemoteRack(1, results.get(0), excludedNodes, 
      blocksize, maxNodesPerRack, results);
}
 
Example #9
Source File: BlockPlacementPolicyDefault.java    From RDFS with Apache License 2.0 6 votes vote down vote up
protected DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  Node oldNode = excludedNodes.put(localMachine, localMachine);
  if (oldNode == null) { // was not in the excluded list
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example #10
Source File: NodeSnapshot.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Remove a node from the snapshot.
 * @param node The node.
 */
public void removeNode(ClusterNode node) {
  String host = node.getHost();
  NodeContainer container = hostToRunnableNode.get(host);
  if (container != null) {
    if (container.removeNode(node)) {
      runnableNodeCount--;
    }
    if (container.isEmpty()) {
      hostToRunnableNode.remove(host);
    }
  }
  Node rack = topologyCache.getNode(host).getParent();
  container = rackToRunnableNode.get(rack);
  if (container != null) {
    container.removeNode(node);
    if (container.isEmpty()) {
      rackToRunnableNode.remove(rack);
    }
  }
}
 
Example #11
Source File: NameNode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private LocatedBlock addBlock(String src, String clientName,
    DatanodeInfo[] excludedNodes, DatanodeInfo[] favoredNodes,
    long startPos, Block lastBlock, BlockMetaInfoType type)
  throws IOException {
  List<Node> excludedNodeList = null;   
  if (excludedNodes != null) {
    // We must copy here, since this list gets modified later on
    // in ReplicationTargetChooser
    excludedNodeList = new ArrayList<Node>(
      Arrays.<Node>asList(excludedNodes));
  }
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
                         +src+" for "+clientName);
  }
  List<DatanodeInfo> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName,
      excludedNodeList, favoredNodesList, startPos, lastBlock, type);
  if (locatedBlock != null)
    myMetrics.numAddBlockOps.inc();
  return locatedBlock;
}
 
Example #12
Source File: ReplicationTargetChooser.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           List<Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  if (!excludedNodes.contains(localMachine)) {
    excludedNodes.add(localMachine);
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example #13
Source File: BlockPlacementPolicyWithNodeGroup.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Find other nodes in the same nodegroup of <i>localMachine</i> and add them
 * into <i>excludeNodes</i> as replica should not be duplicated for nodes 
 * within the same nodegroup
 * @return number of new excluded nodes
 */
@Override
protected int addToExcludedNodes(DatanodeDescriptor chosenNode,
    Set<Node> excludedNodes) {
  int countOfExcludedNodes = 0;
  String nodeGroupScope = chosenNode.getNetworkLocation();
  List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
  for (Node leafNode : leafNodes) {
    if (excludedNodes.add(leafNode)) {
      // not a existing node in excludedNodes
      countOfExcludedNodes++;
    }
  }
  
  countOfExcludedNodes += addDependentNodesToExcludedNodes(
      chosenNode, excludedNodes);
  return countOfExcludedNodes;
}
 
Example #14
Source File: SimulatorJobInProgress.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private int getClosestLocality(TaskTracker taskTracker, RawSplit split) {
  int locality = 2;

  Node taskTrackerNode = jobtracker
      .getNode(taskTracker.getStatus().getHost());
  if (taskTrackerNode == null) {
    throw new IllegalArgumentException(
        "Cannot determine network topology node for TaskTracker "
            + taskTracker.getTrackerName());
  }
  for (String location : split.getLocations()) {
    Node dataNode = jobtracker.getNode(location);
    if (dataNode == null) {
      throw new IllegalArgumentException(
          "Cannot determine network topology node for split location "
              + location);
    }
    locality = Math.min(locality, jobtracker.clusterMap.getDistance(
        taskTrackerNode, dataNode));
  }
  return locality;
}
 
Example #15
Source File: BlockPlacementPolicyDefault.java    From RDFS with Apache License 2.0 6 votes vote down vote up
protected void chooseRemoteRack(int numOfReplicas,
                              DatanodeDescriptor localMachine,
                              HashMap<Node, Node> excludedNodes,
                              long blocksize,
                              int maxReplicasPerRack,
                              List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();
  // randomly choose one node from remote racks
  try {
    chooseRandom(numOfReplicas, "~"+localMachine.getNetworkLocation(),
                 excludedNodes, blocksize, maxReplicasPerRack, results);
  } catch (NotEnoughReplicasException e) {
    chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
                 localMachine.getNetworkLocation(), excludedNodes, blocksize, 
                 maxReplicasPerRack, results);
  }
}
 
Example #16
Source File: ClusterNode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public ClusterNode(
    ClusterNodeInfo clusterNodeInfo, Node node,
    Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning) {
  clusterNodeInfo.address.host = clusterNodeInfo.address.host.intern();
  this.clusterNodeInfo = clusterNodeInfo;
  this.freeSpecs = clusterNodeInfo.getFree();
  lastHeartbeatTime = ClusterManager.clock.getTime();
  this.hostNode = node;
  initResourceTypeToCpu(cpuToResourcePartitioning);
}
 
Example #17
Source File: NamenodeWebHdfsMethods.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Choose the datanode to redirect the request. Note that the nodes have been
 * sorted based on availability and network distances, thus it is sufficient
 * to return the first element of the node here.
 */
private static DatanodeInfo bestNode(DatanodeInfo[] nodes,
    HashSet<Node> excludes) throws IOException {
  for (DatanodeInfo dn: nodes) {
    if (false == dn.isDecommissioned() && false == excludes.contains(dn)) {
      return dn;
    }
  }
  throw new IOException("No active nodes contain this block");
}
 
Example #18
Source File: BlockPlacementPolicyWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private DatanodeStorageInfo chooseLocalNodeGroup(
    NetworkTopologyWithNodeGroup clusterMap, Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local node group
  try {
    return chooseRandom(
        clusterMap.getNodeGroup(localMachine.getNetworkLocation()),
        excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes,
        storageTypes);
  } catch (NotEnoughReplicasException e1) {
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getNodeGroup(newLocal.getNetworkLocation()),
            excludedNodes, blocksize, maxNodesPerRack, results,
            avoidStaleNodes, storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example #19
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testChooseTargetWithStaleNodes() throws Exception {
  // Set dataNodes[0] as stale
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], -(staleInterval + 1));
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
  assertTrue(namenode.getNamesystem().getBlockManager()
      .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
  DatanodeStorageInfo[] targets;
  // We set the datanode[0] as stale, thus should choose datanode[1] since
  // datanode[1] is on the same rack with datanode[0] (writer)
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  Set<Node> excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[1]);
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
  targets = chooseTarget(1, chosenNodes, excludedNodes);
  assertEquals(targets.length, 1);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));
  
  // reset
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], 0);
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
Example #20
Source File: BlockPlacementPolicyDefault.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Return a pipeline of nodes.
 * The pipeline is formed finding a shortest path that 
 * starts from the writer and traverses all <i>nodes</i>
 * This is basically a traveling salesman problem.
 */
private DatanodeStorageInfo[] getPipeline(Node writer,
    DatanodeStorageInfo[] storages) {
  if (storages.length == 0) {
    return storages;
  }

  synchronized(clusterMap) {
    int index=0;
    if (writer == null || !clusterMap.contains(writer)) {
      writer = storages[0].getDatanodeDescriptor();
    }
    for(; index < storages.length; index++) {
      DatanodeStorageInfo shortestStorage = storages[index];
      int shortestDistance = clusterMap.getDistance(writer,
          shortestStorage.getDatanodeDescriptor());
      int shortestIndex = index;
      for(int i = index + 1; i < storages.length; i++) {
        int currentDistance = clusterMap.getDistance(writer,
            storages[i].getDatanodeDescriptor());
        if (shortestDistance>currentDistance) {
          shortestDistance = currentDistance;
          shortestStorage = storages[i];
          shortestIndex = i;
        }
      }
      //switch position index & shortestIndex
      if (index != shortestIndex) {
        storages[shortestIndex] = storages[index];
        storages[index] = shortestStorage;
      }
      writer = shortestStorage.getDatanodeDescriptor();
    }
  }
  return storages;
}
 
Example #21
Source File: ReplicationTargetChooser.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void chooseRandom(int numOfReplicas,
                          String nodes,
                          List<Node> excludedNodes,
                          long blocksize,
                          int maxNodesPerRack,
                          List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  boolean toContinue = true;
  do {
    DatanodeDescriptor[] selectedNodes = 
      chooseRandom(numOfReplicas, nodes, excludedNodes);
    if (selectedNodes.length < numOfReplicas) {
      toContinue = false;
    }
    for(int i=0; i<selectedNodes.length; i++) {
      DatanodeDescriptor result = selectedNodes[i];
      if (isGoodTarget(result, blocksize, maxNodesPerRack, results)) {
        numOfReplicas--;
        results.add(result);
      }
    } // end of for
  } while (numOfReplicas>0 && toContinue);
    
  if (numOfReplicas>0) {
    throw new NotEnoughReplicasException(
                                         "Not able to place enough replicas");
  }
}
 
Example #22
Source File: JobInProgress.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private int getMatchingLevelForNodes(Node n1, Node n2) {
  int count = 0;
  do {
    if (n1.equals(n2)) {
      return count;
    }
    ++count;
    n1 = n1.getParent();
    n2 = n2.getParent();
  } while (n1 != null && n2 != null);
  return this.maxLevel;
}
 
Example #23
Source File: TestReplicationPolicyWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private DatanodeStorageInfo[] chooseTarget(
    int numOfReplicas,
    DatanodeDescriptor writer,
    List<DatanodeStorageInfo> chosenNodes,
    Set<Node> excludedNodes) {
  return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
      false, excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
}
 
Example #24
Source File: NodeManager.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Return an existing NodeContainer representing the rack or if it
 * does not exist - create a new NodeContainer and return it.
 *
 * @param rack the rack to return the node container for
 * @return the node container representing the rack
 */
private NodeContainer getOrCreateRackRunnableNode(Node rack) {
  NodeContainer nodeContainer = rackToRunnableNodes.get(rack);
  if (nodeContainer == null) {
    nodeContainer = new NodeContainer();
    NodeContainer oldList =
        rackToRunnableNodes.putIfAbsent(rack, nodeContainer);
    if (oldList != null) {
      nodeContainer = oldList;
    }
  }
  return nodeContainer;
}
 
Example #25
Source File: BlockPlacementPolicyWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected DatanodeStorageInfo chooseLocalRack(Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local rack, but off-nodegroup
  try {
    final String scope = NetworkTopology.getFirstHalf(localMachine.getNetworkLocation());
    return chooseRandom(scope, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch (NotEnoughReplicasException e1) {
    // find the second replica
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes,
            blocksize, maxNodesPerRack, results, avoidStaleNodes,
            storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example #26
Source File: BlockPlacementPolicyFakeData.java    From RDFS with Apache License 2.0 5 votes vote down vote up
DatanodeDescriptor[] chooseTarget(int numOfReplicas,
                                  DatanodeDescriptor writer,
                                  List<DatanodeDescriptor> chosenNodes,
                                  List<Node> exlcNodes,
                                  long blocksize) {
  
  if (overridingDatanode != null) {
    FSNamesystem.LOG.info("Block Placement: using override target node " + overridingDatanode.getName());
    return new DatanodeDescriptor[] { overridingDatanode };
  }
  
  return super.chooseTarget(numOfReplicas, writer, chosenNodes, exlcNodes, blocksize);
}
 
Example #27
Source File: BlockPlacementPolicyDefault.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Randomly choose one target from the given <i>scope</i>.
 * @return the chosen storage, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(String scope,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes)
        throws NotEnoughReplicasException {
  return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack,
      results, avoidStaleNodes, storageTypes);
}
 
Example #28
Source File: JobTracker.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public Node resolveAndAddToTopology(String name) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(name);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = rNameList.get(0);
  String networkLoc = NodeBase.normalize(rName);
  return addHostToNodeMapping(name, networkLoc);
}
 
Example #29
Source File: JobInProgress.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Remove a map TIP from the lists for running maps.
 * Called when a map fails/completes (note if a map is killed,
 * it won't be present in the list since it was completed earlier)
 * @param tip the tip that needs to be retired
 */
private synchronized void retireMap(TaskInProgress tip) {
  if (runningMapCache == null) {
    LOG.warn("Running cache for maps missing!! "
             + "Job details are missing.");
    return;
  }
  
  String[] splitLocations = tip.getSplitLocations();

  // Remove the TIP from the list for running non-local maps
  if (splitLocations.length == 0) {
    nonLocalRunningMaps.remove(tip);
    return;
  }

  // Remove from the running map caches
  for(String host: splitLocations) {
    Node node = jobtracker.getNode(host);

    for (int j = 0; j < maxLevel; ++j) {
      Set<TaskInProgress> hostMaps = runningMapCache.get(node);
      if (hostMaps != null) {
        hostMaps.remove(tip);
        if (hostMaps.size() == 0) {
          runningMapCache.remove(node);
        }
      }
      node = node.getParent();
    }
  }
}
 
Example #30
Source File: BlockManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Choose target for WebHDFS redirection. */
public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src,
    DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
  return blockplacement.chooseTarget(src, 1, clientnode,
      Collections.<DatanodeStorageInfo>emptyList(), false, excludes,
      blocksize, storagePolicySuite.getDefaultPolicy());
}