Java Code Examples for org.apache.hadoop.net.Node

The following examples show how to use org.apache.hadoop.net.Node. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: RDFS   Source File: NodeSnapshot.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Remove a node from the snapshot.
 * @param node The node.
 */
public void removeNode(ClusterNode node) {
  String host = node.getHost();
  NodeContainer container = hostToRunnableNode.get(host);
  if (container != null) {
    if (container.removeNode(node)) {
      runnableNodeCount--;
    }
    if (container.isEmpty()) {
      hostToRunnableNode.remove(host);
    }
  }
  Node rack = topologyCache.getNode(host).getParent();
  container = rackToRunnableNode.get(rack);
  if (container != null) {
    container.removeNode(node);
    if (container.isEmpty()) {
      rackToRunnableNode.remove(rack);
    }
  }
}
 
Example 2
Source Project: RDFS   Source File: TestConfigurableBlockPlacement.java    License: Apache License 2.0 6 votes vote down vote up
public void testChooseTargetWithDefaultRack() throws Exception {
  VerifiablePolicy policy = initTest();
  TestMapping mapping = (TestMapping) policy.dnsToSwitchMapping;
  mapping.assignDefaultRack = true;
  try {
    policy.hostsUpdated();
    fail("Did not throw : " + DefaultRackException.class);
  } catch (DefaultRackException e) {}

  // Verify there is no default rack.
  RackRingInfo info = policy.racksMap.get(NetworkTopology.DEFAULT_RACK);
  assertNull(info);

  HashMap<Node, Node> emptyMap = new HashMap<Node, Node>();
  List<DatanodeDescriptor> results = new ArrayList<DatanodeDescriptor>();
  for (int i = 0; i < dataNodes.length; i++) {
    policy.chooseTarget(3, dataNodes[i], emptyMap, 512, 4, results, true);
  }
}
 
Example 3
Source Project: big-c   Source File: TestDeleteRace.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
      numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
      blocksize, storagePolicy);
  try {
    Thread.sleep(3000);
  } catch (InterruptedException e) {}
  return results;
}
 
Example 4
Source Project: RDFS   Source File: JobInProgress.java    License: Apache License 2.0 6 votes vote down vote up
private void refreshCandidateSpeculativeMaps(long now) {
  if (!hasSpeculativeMaps()) {
    return;
  }
  //////// Populate allTips with all TaskInProgress
  Set<TaskInProgress> allTips = new HashSet<TaskInProgress>();

  // collection of node at max level in the cache structure
  Collection<Node> nodesAtMaxLevel = jobtracker.getNodesAtMaxLevel();
  // Add all tasks from max-level nodes breadth-wise
  for (Node parent : nodesAtMaxLevel) {
    Set<TaskInProgress> cache = runningMapCache.get(parent);
    if (cache != null) {
      allTips.addAll(cache);
    }
  }
  // Add all non-local TIPs
  allTips.addAll(nonLocalRunningMaps);

  // update the progress rates of all the candidate tips ..
  for (TaskInProgress tip: allTips) {
    tip.updateProgressRate(now);
  }

  candidateSpeculativeMaps = findSpeculativeTaskCandidates(allTips);
}
 
Example 5
Source Project: RDFS   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
protected DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           HashMap<Node, Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  Node oldNode = excludedNodes.put(localMachine, localMachine);
  if (oldNode == null) { // was not in the excluded list
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example 6
Source Project: big-c   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Find other nodes in the same nodegroup of <i>localMachine</i> and add them
 * into <i>excludeNodes</i> as replica should not be duplicated for nodes 
 * within the same nodegroup
 * @return number of new excluded nodes
 */
@Override
protected int addToExcludedNodes(DatanodeDescriptor chosenNode,
    Set<Node> excludedNodes) {
  int countOfExcludedNodes = 0;
  String nodeGroupScope = chosenNode.getNetworkLocation();
  List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
  for (Node leafNode : leafNodes) {
    if (excludedNodes.add(leafNode)) {
      // not a existing node in excludedNodes
      countOfExcludedNodes++;
    }
  }
  
  countOfExcludedNodes += addDependentNodesToExcludedNodes(
      chosenNode, excludedNodes);
  return countOfExcludedNodes;
}
 
Example 7
Source Project: hadoop-gpu   Source File: ReplicationTargetChooser.java    License: Apache License 2.0 6 votes vote down vote up
private DatanodeDescriptor chooseLocalNode(
                                           DatanodeDescriptor localMachine,
                                           List<Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  // if no local machine, randomly choose one node
  if (localMachine == null)
    return chooseRandom(NodeBase.ROOT, excludedNodes, 
                        blocksize, maxNodesPerRack, results);
    
  // otherwise try local machine first
  if (!excludedNodes.contains(localMachine)) {
    excludedNodes.add(localMachine);
    if (isGoodTarget(localMachine, blocksize,
                     maxNodesPerRack, false, results)) {
      results.add(localMachine);
      return localMachine;
    }
  } 
    
  // try a node on local rack
  return chooseLocalRack(localMachine, excludedNodes, 
                         blocksize, maxNodesPerRack, results);
}
 
Example 8
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
private DatanodeStorageInfo chooseFromNextRack(Node next,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException {
  final String nextRack = next.getNetworkLocation();
  try {
    return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack,
        results, avoidStaleNodes, storageTypes);
  } catch(NotEnoughReplicasException e) {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Failed to choose from the next rack (location = " + nextRack
          + "), retry choosing ramdomly", e);
    }
    //otherwise randomly choose one from the network
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }
}
 
Example 9
Source Project: RDFS   Source File: NameNode.java    License: Apache License 2.0 6 votes vote down vote up
private LocatedBlock addBlock(String src, String clientName,
    DatanodeInfo[] excludedNodes, DatanodeInfo[] favoredNodes,
    long startPos, Block lastBlock, BlockMetaInfoType type)
  throws IOException {
  List<Node> excludedNodeList = null;   
  if (excludedNodes != null) {
    // We must copy here, since this list gets modified later on
    // in ReplicationTargetChooser
    excludedNodeList = new ArrayList<Node>(
      Arrays.<Node>asList(excludedNodes));
  }
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
                         +src+" for "+clientName);
  }
  List<DatanodeInfo> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName,
      excludedNodeList, favoredNodesList, startPos, lastBlock, type);
  if (locatedBlock != null)
    myMetrics.numAddBlockOps.inc();
  return locatedBlock;
}
 
Example 10
Source Project: RDFS   Source File: SimulatorJobInProgress.java    License: Apache License 2.0 6 votes vote down vote up
private int getClosestLocality(TaskTracker taskTracker, RawSplit split) {
  int locality = 2;

  Node taskTrackerNode = jobtracker
      .getNode(taskTracker.getStatus().getHost());
  if (taskTrackerNode == null) {
    throw new IllegalArgumentException(
        "Cannot determine network topology node for TaskTracker "
            + taskTracker.getTrackerName());
  }
  for (String location : split.getLocations()) {
    Node dataNode = jobtracker.getNode(location);
    if (dataNode == null) {
      throw new IllegalArgumentException(
          "Cannot determine network topology node for split location "
              + location);
    }
    locality = Math.min(locality, jobtracker.clusterMap.getDistance(
        taskTrackerNode, dataNode));
  }
  return locality;
}
 
Example 11
Source Project: RDFS   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
protected void chooseRemoteRack(int numOfReplicas,
                              DatanodeDescriptor localMachine,
                              HashMap<Node, Node> excludedNodes,
                              long blocksize,
                              int maxReplicasPerRack,
                              List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {
  int oldNumOfReplicas = results.size();
  // randomly choose one node from remote racks
  try {
    chooseRandom(numOfReplicas, "~"+localMachine.getNetworkLocation(),
                 excludedNodes, blocksize, maxReplicasPerRack, results);
  } catch (NotEnoughReplicasException e) {
    chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas),
                 localMachine.getNetworkLocation(), excludedNodes, blocksize, 
                 maxReplicasPerRack, results);
  }
}
 
Example 12
Source Project: RDFS   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * all the chosen nodes are on the same rack, choose a node on a new rack for
 * the next replica according to where the writer is
 */
private void choose2ndRack(DatanodeDescriptor writer,
    HashMap<Node, Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeDescriptor> results) throws NotEnoughReplicasException {
  if (!clusterMap.isOnSameRack(writer, results.get(0))) {
    DatanodeDescriptor localNode = chooseLocalNode(writer, excludedNodes,
        blocksize, maxNodesPerRack, results);
    if (clusterMap.isOnSameRack(localNode, results.get(0))) {
      // should not put 2nd replica on the same rack as the first replica
      results.remove(localNode); 
    } else {
      return;
    }
  }
  chooseRemoteRack(1, results.get(0), excludedNodes, 
      blocksize, maxNodesPerRack, results);
}
 
Example 13
Source Project: big-c   Source File: NameNodeRpcServer.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LocatedBlock addBlock(String src, String clientName,
    ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
    String[] favoredNodes)
    throws IOException {
  checkNNStartup();
  if (stateChangeLog.isDebugEnabled()) {
    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
        + " fileId=" + fileId + " for " + clientName);
  }
  Set<Node> excludedNodesSet = null;
  if (excludedNodes != null) {
    excludedNodesSet = new HashSet<Node>(excludedNodes.length);
    for (Node node : excludedNodes) {
      excludedNodesSet.add(node);
    }
  }
  List<String> favoredNodesList = (favoredNodes == null) ? null
      : Arrays.asList(favoredNodes);
  LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
      clientName, previous, excludedNodesSet, favoredNodesList);
  if (locatedBlock != null)
    metrics.incrAddBlockOps();
  return locatedBlock;
}
 
Example 14
Source Project: big-c   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * If the given storage is a good target, add it to the result list and
 * update the set of excluded nodes.
 * @return -1 if the given is not a good target;
 *         otherwise, return the number of nodes added to excludedNodes set.
 */
int addIfIsGoodTarget(DatanodeStorageInfo storage,
    Set<Node> excludedNodes,
    long blockSize,
    int maxNodesPerRack,
    boolean considerLoad,
    List<DatanodeStorageInfo> results,                           
    boolean avoidStaleNodes,
    StorageType storageType) {
  if (isGoodTarget(storage, blockSize, maxNodesPerRack, considerLoad,
      results, avoidStaleNodes, storageType)) {
    results.add(storage);
    // add node and related nodes to excludedNode
    return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
  } else { 
    return -1;
  }
}
 
Example 15
Source Project: RDFS   Source File: JobInProgress.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Get the level of locality that a given task would have if launched on
 * a particular TaskTracker. Returns 0 if the task has data on that machine,
 * 1 if it has data on the same rack, etc (depending on number of levels in
 * the network hierarchy).
 */
int getLocalityLevel(TaskInProgress tip, TaskTrackerStatus tts) {
  Node tracker = jobtracker.getNode(tts.getHost());
  int level = this.maxLevel;
  // find the right level across split locations
  for (String local : maps[tip.getIdWithinJob()].getSplitLocations()) {
    Node datanode = jobtracker.getNode(local);
    int newLevel = this.maxLevel;
    if (tracker != null && datanode != null) {
      newLevel = getMatchingLevelForNodes(tracker, datanode);
    }
    if (newLevel < level) {
      level = newLevel;
      // an optimization
      if (level == 0) {
        break;
      }
    }
  }
  return level;
}
 
Example 16
Source Project: RDFS   Source File: JobTracker.java    License: Apache License 2.0 5 votes vote down vote up
public Node resolveAndAddToTopology(String name) {
  List <String> tmpList = new ArrayList<String>(1);
  tmpList.add(name);
  List <String> rNameList = dnsToSwitchMapping.resolve(tmpList);
  String rName = rNameList.get(0);
  String networkLoc = NodeBase.normalize(rName);
  return addHostToNodeMapping(name, networkLoc);
}
 
Example 17
Source Project: hadoop-gpu   Source File: JobInProgress.java    License: Apache License 2.0 5 votes vote down vote up
private Map<Node, List<TaskInProgress>> createCache(
                       JobClient.RawSplit[] splits, int maxLevel) {
  Map<Node, List<TaskInProgress>> cache = 
    new IdentityHashMap<Node, List<TaskInProgress>>(maxLevel);
  
  for (int i = 0; i < splits.length; i++) {
    String[] splitLocations = splits[i].getLocations();
    if (splitLocations.length == 0) {
      nonLocalMaps.add(maps[i]);
      continue;
    }

    for(String host: splitLocations) {
      Node node = jobtracker.resolveAndAddToTopology(host);
      LOG.info("tip:" + maps[i].getTIPId() + " has split on node:" + node);
      for (int j = 0; j < maxLevel; j++) {
        List<TaskInProgress> hostMaps = cache.get(node);
        if (hostMaps == null) {
          hostMaps = new ArrayList<TaskInProgress>();
          cache.put(node, hostMaps);
          hostMaps.add(maps[i]);
        }
        //check whether the hostMaps already contains an entry for a TIP
        //This will be true for nodes that are racks and multiple nodes in
        //the rack contain the input for a tip. Note that if it already
        //exists in the hostMaps, it must be the last element there since
        //we process one TIP at a time sequentially in the split-size order
        if (hostMaps.get(hostMaps.size() - 1) != maps[i]) {
          hostMaps.add(maps[i]);
        }
        node = node.getParent();
      }
    }
  }
  return cache;
}
 
Example 18
Source Project: big-c   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
      excludedNodes, blocksize, storagePolicy);
}
 
Example 19
Source Project: RDFS   Source File: LocalityStats.java    License: Apache License 2.0 5 votes vote down vote up
public static int getMatchingLevelForNodes(Node n1, Node n2, int maxLevel) {
  int count = 0;
  do {
    if (n1.equals(n2)) {
      return count;
    }
    ++count;
    n1 = n1.getParent();
    n2 = n2.getParent();
  } while (n1 != null && n2 != null);
  return maxLevel;
}
 
Example 20
Source Project: hadoop   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public LocatedBlock getAdditionalDatanode(final String src,
    final long fileId, final ExtendedBlock blk,
    final DatanodeInfo[] existings, final String[] existingStorageIDs,
    final DatanodeInfo[] excludes,
    final int numAdditionalNodes, final String clientName
    ) throws IOException {
  checkNNStartup();
  if (LOG.isDebugEnabled()) {
    LOG.debug("getAdditionalDatanode: src=" + src
        + ", fileId=" + fileId
        + ", blk=" + blk
        + ", existings=" + Arrays.asList(existings)
        + ", excludes=" + Arrays.asList(excludes)
        + ", numAdditionalNodes=" + numAdditionalNodes
        + ", clientName=" + clientName);
  }

  metrics.incrGetAdditionalDatanodeOps();

  Set<Node> excludeSet = null;
  if (excludes != null) {
    excludeSet = new HashSet<Node>(excludes.length);
    for (Node node : excludes) {
      excludeSet.add(node);
    }
  }
  return namesystem.getAdditionalDatanode(src, fileId, blk, existings,
      existingStorageIDs, excludeSet, numAdditionalNodes, clientName);
}
 
Example 21
Source Project: RDFS   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 5 votes vote down vote up
void chooseRandom(int numOfReplicas,
                  String nodes,
                  HashMap<Node, Node> excludedNodes,
                  long blocksize,
                  int maxNodesPerRack,
                  List<DatanodeDescriptor> results)
  throws NotEnoughReplicasException {

  int numOfAvailableNodes =
    clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
  int numAttempts = numOfAvailableNodes * this.attemptMultiplier;
  while(numOfReplicas > 0 && numOfAvailableNodes > 0 && --numAttempts > 0) {
    DatanodeDescriptor chosenNode = 
      (DatanodeDescriptor)(clusterMap.chooseRandom(nodes));
    Node oldNode = excludedNodes.put(chosenNode, chosenNode);
    if (oldNode == null) {
      numOfAvailableNodes--;

      if (isGoodTarget(chosenNode, blocksize, maxNodesPerRack, results)) {
        numOfReplicas--;
        results.add(chosenNode);
      }
    }
  }
    
  if (numOfReplicas>0) {
    throw new NotEnoughReplicasException(
                                         "Not able to place enough replicas");
  }
}
 
Example 22
Source Project: big-c   Source File: BlockPlacementPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
private DatanodeStorageInfo chooseLocalNodeGroup(
    NetworkTopologyWithNodeGroup clusterMap, Node localMachine,
    Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
    List<DatanodeStorageInfo> results, boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes) throws
    NotEnoughReplicasException {
  // no local machine, so choose a random machine
  if (localMachine == null) {
    return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
  }

  // choose one from the local node group
  try {
    return chooseRandom(
        clusterMap.getNodeGroup(localMachine.getNetworkLocation()),
        excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes,
        storageTypes);
  } catch (NotEnoughReplicasException e1) {
    final DatanodeDescriptor newLocal = secondNode(localMachine, results);
    if (newLocal != null) {
      try {
        return chooseRandom(
            clusterMap.getNodeGroup(newLocal.getNetworkLocation()),
            excludedNodes, blocksize, maxNodesPerRack, results,
            avoidStaleNodes, storageTypes);
      } catch(NotEnoughReplicasException e2) {
        //otherwise randomly choose one from the network
        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
      }
    } else {
      //otherwise randomly choose one from the network
      return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
    }
  }
}
 
Example 23
Source Project: hadoop   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Choose target for WebHDFS redirection. */
public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src,
    DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
  return blockplacement.chooseTarget(src, 1, clientnode,
      Collections.<DatanodeStorageInfo>emptyList(), false, excludes,
      blocksize, storagePolicySuite.getDefaultPolicy());
}
 
Example 24
Source Project: big-c   Source File: BlockManager.java    License: Apache License 2.0 5 votes vote down vote up
/** Choose target for getting additional datanodes for an existing pipeline. */
public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src,
    int numAdditionalNodes,
    Node clientnode,
    List<DatanodeStorageInfo> chosen,
    Set<Node> excludes,
    long blocksize,
    byte storagePolicyID) {
  
  final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
  return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
      chosen, true, excludes, blocksize, storagePolicy);
}
 
Example 25
Source Project: hadoop   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public DatanodeStorageInfo[] chooseTarget(String srcPath,
                                  int numOfReplicas,
                                  Node writer,
                                  List<DatanodeStorageInfo> chosenNodes,
                                  boolean returnChosenNodes,
                                  Set<Node> excludedNodes,
                                  long blocksize,
                                  final BlockStoragePolicy storagePolicy) {
  return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
      excludedNodes, blocksize, storagePolicy);
}
 
Example 26
Source Project: RDFS   Source File: JobInProgress.java    License: Apache License 2.0 5 votes vote down vote up
private void printCache (Map<Node, List<TaskInProgress>> cache) {
  LOG.info("The taskcache info:");
  for (Map.Entry<Node, List<TaskInProgress>> n : cache.entrySet()) {
    List <TaskInProgress> tips = n.getValue();
    LOG.info("Cached TIPs on node: " + n.getKey());
    for (TaskInProgress tip : tips) {
      LOG.info("tip : " + tip.getTIPId());
    }
  }
}
 
Example 27
Source Project: hadoop-gpu   Source File: JobInProgress.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Remove a map TIP from the lists for running maps.
 * Called when a map fails/completes (note if a map is killed,
 * it won't be present in the list since it was completed earlier)
 * @param tip the tip that needs to be retired
 */
private synchronized void retireMap(TaskInProgress tip) {
  if (runningMapCache == null) {
    LOG.warn("Running cache for maps missing!! "
             + "Job details are missing.");
    return;
  }
  
  String[] splitLocations = tip.getSplitLocations();

  // Remove the TIP from the list for running non-local maps
  if (splitLocations.length == 0) {
    nonLocalRunningMaps.remove(tip);
    return;
  }

  // Remove from the running map caches
  for(String host: splitLocations) {
    Node node = jobtracker.getNode(host);

    for (int j = 0; j < maxLevel; ++j) {
      Set<TaskInProgress> hostMaps = runningMapCache.get(node);
      if (hostMaps != null) {
        hostMaps.remove(tip);
        if (hostMaps.size() == 0) {
          runningMapCache.remove(node);
        }
      }
      node = node.getParent();
    }
  }
}
 
Example 28
Source Project: big-c   Source File: BlockPlacementPolicyDefault.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Randomly choose one target from the given <i>scope</i>.
 * @return the chosen storage, if there is any.
 */
protected DatanodeStorageInfo chooseRandom(String scope,
    Set<Node> excludedNodes,
    long blocksize,
    int maxNodesPerRack,
    List<DatanodeStorageInfo> results,
    boolean avoidStaleNodes,
    EnumMap<StorageType, Integer> storageTypes)
        throws NotEnoughReplicasException {
  return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack,
      results, avoidStaleNodes, storageTypes);
}
 
Example 29
Source Project: RDFS   Source File: NodeManager.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Return an existing NodeContainer representing the rack or if it
 * does not exist - create a new NodeContainer and return it.
 *
 * @param rack the rack to return the node container for
 * @return the node container representing the rack
 */
private NodeContainer getOrCreateRackRunnableNode(Node rack) {
  NodeContainer nodeContainer = rackToRunnableNodes.get(rack);
  if (nodeContainer == null) {
    nodeContainer = new NodeContainer();
    NodeContainer oldList =
        rackToRunnableNodes.putIfAbsent(rack, nodeContainer);
    if (oldList != null) {
      nodeContainer = oldList;
    }
  }
  return nodeContainer;
}
 
Example 30
Source Project: RDFS   Source File: ClusterNode.java    License: Apache License 2.0 5 votes vote down vote up
public ClusterNode(
    ClusterNodeInfo clusterNodeInfo, Node node,
    Map<Integer, Map<ResourceType, Integer>> cpuToResourcePartitioning) {
  clusterNodeInfo.address.host = clusterNodeInfo.address.host.intern();
  this.clusterNodeInfo = clusterNodeInfo;
  this.freeSpecs = clusterNodeInfo.getFree();
  lastHeartbeatTime = ClusterManager.clock.getTime();
  this.hostNode = node;
  initResourceTypeToCpu(cpuToResourcePartitioning);
}