org.apache.hadoop.yarn.util.RackResolver Java Examples
The following examples show how to use
org.apache.hadoop.yarn.util.RackResolver.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: YarnTaskSchedulerService.java From tez with Apache License 2.0 | 6 votes |
HeldContainer(Container container, long nextScheduleTime, long containerExpiryTime, CookieContainerRequest firstTaskInfo, ContainerSignatureMatcher signatureMatcher) { this.container = container; this.nextScheduleTime = nextScheduleTime; if (firstTaskInfo != null) { this.lastTaskInfo = firstTaskInfo; this.lastAssignedContainerSignature = firstTaskInfo.getCookie().getContainerSignature(); } this.localityMatchLevel = LocalityMatchLevel.NODE; this.containerExpiryTime = containerExpiryTime; this.rack = RackResolver.resolve(container.getNodeId().getHost()) .getNetworkLocation(); this.signatureMatcher = signatureMatcher; }
Example #2
Source File: AMRMClientImpl.java From hadoop with Apache License 2.0 | 6 votes |
private Set<String> resolveRacks(List<String> nodes) { Set<String> racks = new HashSet<String>(); if (nodes != null) { for (String node : nodes) { // Ensure node requests are accompanied by requests for // corresponding rack String rack = RackResolver.resolve(node).getNetworkLocation(); if (rack == null) { LOG.warn("Failed to resolve rack for node " + node + "."); } else { racks.add(rack); } } } return racks; }
Example #3
Source File: TaskAttemptImpl.java From hadoop with Apache License 2.0 | 6 votes |
private void computeRackAndLocality() { NodeId containerNodeId = container.getNodeId(); nodeRackName = RackResolver.resolve( containerNodeId.getHost()).getNetworkLocation(); locality = Locality.OFF_SWITCH; if (dataLocalHosts.size() > 0) { String cHost = resolveHost(containerNodeId.getHost()); if (dataLocalHosts.contains(cHost)) { locality = Locality.NODE_LOCAL; } } if (locality == Locality.OFF_SWITCH) { if (dataLocalRacks.contains(nodeRackName)) { locality = Locality.RACK_LOCAL; } } }
Example #4
Source File: TaskAttemptImpl.java From big-c with Apache License 2.0 | 6 votes |
private void computeRackAndLocality() { NodeId containerNodeId = container.getNodeId(); nodeRackName = RackResolver.resolve( containerNodeId.getHost()).getNetworkLocation(); locality = Locality.OFF_SWITCH; if (dataLocalHosts.size() > 0) { String cHost = resolveHost(containerNodeId.getHost()); if (dataLocalHosts.contains(cHost)) { locality = Locality.NODE_LOCAL; } } if (locality == Locality.OFF_SWITCH) { if (dataLocalRacks.contains(nodeRackName)) { locality = Locality.RACK_LOCAL; } } }
Example #5
Source File: TaskAttemptImpl.java From incubator-tez with Apache License 2.0 | 6 votes |
@SuppressWarnings("rawtypes") public TaskAttemptImpl(TezTaskID taskId, int attemptNumber, EventHandler eventHandler, TaskAttemptListener taskAttemptListener, Configuration conf, Clock clock, TaskHeartbeatHandler taskHeartbeatHandler, AppContext appContext, boolean isRescheduled, Resource resource, ContainerContext containerContext, boolean leafVertex) { ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); this.readLock = rwLock.readLock(); this.writeLock = rwLock.writeLock(); this.attemptId = TezBuilderUtils.newTaskAttemptId(taskId, attemptNumber); this.eventHandler = eventHandler; //Reported status this.conf = conf; this.clock = clock; this.taskHeartbeatHandler = taskHeartbeatHandler; this.appContext = appContext; this.reportedStatus = new TaskAttemptStatus(); initTaskAttemptStatus(reportedStatus); RackResolver.init(conf); this.stateMachine = stateMachineFactory.make(this); this.isRescheduled = isRescheduled; this.taskResource = resource; this.containerContext = containerContext; this.leafVertex = leafVertex; }
Example #6
Source File: AMRMClientImpl.java From big-c with Apache License 2.0 | 6 votes |
private Set<String> resolveRacks(List<String> nodes) { Set<String> racks = new HashSet<String>(); if (nodes != null) { for (String node : nodes) { // Ensure node requests are accompanied by requests for // corresponding rack String rack = RackResolver.resolve(node).getNetworkLocation(); if (rack == null) { LOG.warn("Failed to resolve rack for node " + node + "."); } else { racks.add(rack); } } } return racks; }
Example #7
Source File: GreedyFragmentScheduleAlgorithm.java From incubator-tajo with Apache License 2.0 | 5 votes |
public String resolve(String host) { if (hostRackMap.containsKey(host)) { return hostRackMap.get(host); } else { String rack = RackResolver.resolve(host).getNetworkLocation(); hostRackMap.put(host, rack); return rack; } }
Example #8
Source File: DefaultTaskScheduler.java From tajo with Apache License 2.0 | 5 votes |
private void addLeafTask(TaskAttemptToSchedulerEvent event) { TaskAttempt taskAttempt = event.getTaskAttempt(); List<DataLocation> locations = taskAttempt.getTask().getDataLocations(); for (DataLocation location : locations) { String host = location.getHost(); leafTaskHosts.add(host); HostVolumeMapping hostVolumeMapping = leafTaskHostMapping.get(host); if (hostVolumeMapping == null) { String rack = RackResolver.resolve(host).getNetworkLocation(); hostVolumeMapping = new HostVolumeMapping(host, rack); leafTaskHostMapping.put(host, hostVolumeMapping); } hostVolumeMapping.addTaskAttempt(location.getVolumeId(), taskAttempt); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to host " + host); } HashSet<TaskAttemptId> list = leafTasksRackMapping.get(hostVolumeMapping.getRack()); if (list == null) { list = new HashSet<>(); leafTasksRackMapping.put(hostVolumeMapping.getRack(), list); } list.add(taskAttempt.getId()); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to rack " + hostVolumeMapping.getRack()); } } leafTasks.add(taskAttempt.getId()); }
Example #9
Source File: ResourceTrackerService.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { resourceTrackerAddress = conf.getSocketAddr( YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); RackResolver.init(conf); nextHeartBeatInterval = conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); if (nextHeartBeatInterval <= 0) { throw new YarnRuntimeException("Invalid Configuration. " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + " should be larger than 0."); } minAllocMb = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); minAllocVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); minAllocGcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_GCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_GCORES); minimumNodeManagerVersion = conf.get( YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION, YarnConfiguration.DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION); super.serviceInit(conf); }
Example #10
Source File: DefaultFragmentScheduleAlgorithm.java From incubator-tajo with Apache License 2.0 | 5 votes |
@Override public void removeFragment(FragmentPair fragmentPair) { boolean removed = false; for (String eachHost : fragmentPair.getLeftFragment().getHosts()) { String normalizedHost = NetUtils.normalizeHost(eachHost); Map<Integer, FragmentsPerDisk> diskFragmentMap = fragmentHostMapping.get(normalizedHost); for (Entry<Integer, FragmentsPerDisk> entry : diskFragmentMap.entrySet()) { FragmentsPerDisk fragmentsPerDisk = entry.getValue(); removed = fragmentsPerDisk.removeFragmentPair(fragmentPair); if (removed) { if (fragmentsPerDisk.size() == 0) { diskFragmentMap.remove(entry.getKey()); } if (diskFragmentMap.size() == 0) { fragmentHostMapping.remove(normalizedHost); } break; } } String rack = RackResolver.resolve(normalizedHost).getNetworkLocation(); if (rackFragmentMapping.containsKey(rack)) { Set<FragmentPair> fragmentPairs = rackFragmentMapping.get(rack); fragmentPairs.remove(fragmentPair); if (fragmentPairs.size() == 0) { rackFragmentMapping.remove(rack); } } } if (removed) { fragmentNum--; } }
Example #11
Source File: DefaultFragmentScheduleAlgorithm.java From incubator-tajo with Apache License 2.0 | 5 votes |
/** * Randomly select a fragment among the fragments stored on nodes of the same rack with the host. * @param host * @return a randomly selected fragment */ @Override public FragmentPair getRackLocalFragment(String host) { String rack = RackResolver.resolve(host).getNetworkLocation(); if (rackFragmentMapping.containsKey(rack)) { Set<FragmentPair> fragmentPairs = rackFragmentMapping.get(rack); if (!fragmentPairs.isEmpty()) { return fragmentPairs.iterator().next(); } } return null; }
Example #12
Source File: QueryMasterRunner.java From incubator-tajo with Apache License 2.0 | 5 votes |
@Override public void init(Configuration conf) { this.systemConf = (TajoConf)conf; RackResolver.init(systemConf); Runtime.getRuntime().addShutdownHook(new Thread(new ShutdownHook())); super.init(conf); }
Example #13
Source File: DefaultTaskScheduler.java From incubator-tajo with Apache License 2.0 | 5 votes |
private void addLeafTask(QueryUnitAttemptScheduleEvent event) { QueryUnitAttempt queryUnitAttempt = event.getQueryUnitAttempt(); List<DataLocation> locations = queryUnitAttempt.getQueryUnit().getDataLocations(); for (DataLocation location : locations) { String host = location.getHost(); HostVolumeMapping hostVolumeMapping = leafTaskHostMapping.get(host); if (hostVolumeMapping == null) { String rack = RackResolver.resolve(host).getNetworkLocation(); hostVolumeMapping = new HostVolumeMapping(host, rack); leafTaskHostMapping.put(host, hostVolumeMapping); } hostVolumeMapping.addQueryUnitAttempt(location.getVolumeId(), queryUnitAttempt); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to host " + host); } LinkedList<QueryUnitAttemptId> list = leafTasksRackMapping.get(hostVolumeMapping.getRack()); if (list == null) { list = new LinkedList<QueryUnitAttemptId>(); leafTasksRackMapping.put(hostVolumeMapping.getRack(), list); } if(!list.contains(queryUnitAttempt.getId())) list.add(queryUnitAttempt.getId()); if (LOG.isDebugEnabled()) { LOG.debug("Added attempt req to rack " + hostVolumeMapping.getRack()); } } leafTasks.add(queryUnitAttempt.getId()); }
Example #14
Source File: DefaultFragmentScheduleAlgorithm.java From incubator-tajo with Apache License 2.0 | 5 votes |
private void addFragment(String host, Integer diskId, FragmentPair fragmentPair) { // update the fragment maps per host String normalizeHost = NetUtils.normalizeHost(host); Map<Integer, FragmentsPerDisk> diskFragmentMap; if (fragmentHostMapping.containsKey(normalizeHost)) { diskFragmentMap = fragmentHostMapping.get(normalizeHost); } else { diskFragmentMap = new HashMap<Integer, FragmentsPerDisk>(); fragmentHostMapping.put(normalizeHost, diskFragmentMap); } FragmentsPerDisk fragmentsPerDisk; if (diskFragmentMap.containsKey(diskId)) { fragmentsPerDisk = diskFragmentMap.get(diskId); } else { fragmentsPerDisk = new FragmentsPerDisk(diskId); diskFragmentMap.put(diskId, fragmentsPerDisk); } fragmentsPerDisk.addFragmentPair(fragmentPair); // update the fragment maps per rack String rack = RackResolver.resolve(normalizeHost).getNetworkLocation(); Set<FragmentPair> fragmentPairList; if (rackFragmentMapping.containsKey(rack)) { fragmentPairList = rackFragmentMapping.get(rack); } else { fragmentPairList = Collections.newSetFromMap(new HashMap<FragmentPair, Boolean>()); rackFragmentMapping.put(rack, fragmentPairList); } fragmentPairList.add(fragmentPair); }
Example #15
Source File: YarnTaskSchedulerService.java From incubator-tez with Apache License 2.0 | 5 votes |
@Override public CookieContainerRequest assignNewContainer(Container container) { String location = RackResolver.resolve(container.getNodeId().getHost()) .getNetworkLocation(); CookieContainerRequest assigned = getMatchingRequestWithPriority(container, location); doBookKeepingForAssignedContainer(assigned, container, location, false); return assigned; }
Example #16
Source File: YarnTaskSchedulerService.java From incubator-tez with Apache License 2.0 | 5 votes |
HeldContainer(Container container, long nextScheduleTime, long containerExpiryTime, CookieContainerRequest firstTaskInfo) { this.container = container; this.nextScheduleTime = nextScheduleTime; if (firstTaskInfo != null) { this.lastTaskInfo = firstTaskInfo; this.firstContainerSignature = firstTaskInfo.getCookie().getContainerSignature(); } this.localityMatchLevel = LocalityMatchLevel.NODE; this.containerExpiryTime = containerExpiryTime; this.rack = RackResolver.resolve(container.getNodeId().getHost()) .getNetworkLocation(); }
Example #17
Source File: YarnTaskSchedulerService.java From tez with Apache License 2.0 | 5 votes |
@Override public CookieContainerRequest assignNewContainer(Container container) { String location = RackResolver.resolve(container.getNodeId().getHost()) .getNetworkLocation(); CookieContainerRequest assigned = getMatchingRequestWithPriority(container, location); doBookKeepingForAssignedContainer(assigned, container, location, false); return assigned; }
Example #18
Source File: TestTezAMRMClient.java From tez with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Before public void setup() { amrmClient = new TezAMRMClientAsync(new AMRMClientImpl(), 1000, mock(AMRMClientAsync.CallbackHandler.class)); RackResolver.init(new Configuration()); }
Example #19
Source File: RMContainerAllocator.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); //this.reduceScheduler.serviceInit(conf); reduceSlowStart = conf.getFloat( MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART); maxReduceRampupLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT); maxReducePreemptionLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT); allocationDelayThresholdMs = conf.getInt( MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC, MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT); maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT); RackResolver.init(conf); retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS); // Init startTime to current time. If all goes well, it will be reset after // first attempt to contact RM. retrystartTime = System.currentTimeMillis(); }
Example #20
Source File: ResourceTrackerService.java From big-c with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { resourceTrackerAddress = conf.getSocketAddr( YarnConfiguration.RM_BIND_HOST, YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS, YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT); RackResolver.init(conf); nextHeartBeatInterval = conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); if (nextHeartBeatInterval <= 0) { throw new YarnRuntimeException("Invalid Configuration. " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + " should be larger than 0."); } minAllocMb = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); minAllocVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); minimumNodeManagerVersion = conf.get( YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION, YarnConfiguration.DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION); super.serviceInit(conf); }
Example #21
Source File: RMContainerAllocator.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); reduceSlowStart = conf.getFloat( MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART); maxReduceRampupLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT); maxReducePreemptionLimit = conf.getFloat( MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT, MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT); allocationDelayThresholdMs = conf.getInt( MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC, MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT); maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT); RackResolver.init(conf); retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS, MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS); mapNodeLabelExpression = conf.get(MRJobConfig.MAP_NODE_LABEL_EXP); reduceNodeLabelExpression = conf.get(MRJobConfig.REDUCE_NODE_LABEL_EXP); // Init startTime to current time. If all goes well, it will be reset after // first attempt to contact RM. retrystartTime = System.currentTimeMillis(); }
Example #22
Source File: TestJobHistoryParsing.java From big-c with Apache License 2.0 | 4 votes |
/** * Simple test some methods of JobHistory */ @Test(timeout = 20000) public void testJobHistoryMethods() throws Exception { LOG.info("STARTING testJobHistoryMethods"); try { Configuration configuration = new Configuration(); configuration .setClass( NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); RackResolver.init(configuration); MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(), true); app.submit(configuration); Job job = app.getContext().getAllJobs().values().iterator().next(); app.waitForState(job, JobState.SUCCEEDED); JobHistory jobHistory = new JobHistory(); jobHistory.init(configuration); // Method getAllJobs Assert.assertEquals(1, jobHistory.getAllJobs().size()); // and with ApplicationId Assert.assertEquals(1, jobHistory.getAllJobs(app.getAppID()).size()); JobsInfo jobsinfo = jobHistory.getPartialJobs(0L, 10L, null, "default", 0L, System.currentTimeMillis() + 1, 0L, System.currentTimeMillis() + 1, JobState.SUCCEEDED); Assert.assertEquals(1, jobsinfo.getJobs().size()); Assert.assertNotNull(jobHistory.getApplicationAttemptId()); // test Application Id Assert.assertEquals("application_0_0000", jobHistory.getApplicationID() .toString()); Assert .assertEquals("Job History Server", jobHistory.getApplicationName()); // method does not work Assert.assertNull(jobHistory.getEventHandler()); // method does not work Assert.assertNull(jobHistory.getClock()); // method does not work Assert.assertNull(jobHistory.getClusterInfo()); } finally { LOG.info("FINISHED testJobHistoryMethods"); } }
Example #23
Source File: DagAwareYarnTaskScheduler.java From tez with Apache License 2.0 | 4 votes |
HeldContainer(Container container) { this.container = container; this.rack = RackResolver.resolve(container.getNodeId().getHost()).getNetworkLocation(); }
Example #24
Source File: AMRMClientImpl.java From hadoop with Apache License 2.0 | 4 votes |
@Override protected void serviceInit(Configuration conf) throws Exception { RackResolver.init(conf); super.serviceInit(conf); }
Example #25
Source File: TaskAttemptImpl.java From hadoop with Apache License 2.0 | 4 votes |
public TaskAttemptImpl(TaskId taskId, int i, EventHandler eventHandler, TaskAttemptListener taskAttemptListener, Path jobFile, int partition, JobConf conf, String[] dataLocalHosts, Token<JobTokenIdentifier> jobToken, Credentials credentials, Clock clock, AppContext appContext) { oldJobId = TypeConverter.fromYarn(taskId.getJobId()); this.conf = conf; this.clock = clock; attemptId = recordFactory.newRecordInstance(TaskAttemptId.class); attemptId.setTaskId(taskId); attemptId.setId(i); this.taskAttemptListener = taskAttemptListener; this.appContext = appContext; // Initialize reportedStatus reportedStatus = new TaskAttemptStatus(); initTaskAttemptStatus(reportedStatus); ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); readLock = readWriteLock.readLock(); writeLock = readWriteLock.writeLock(); this.credentials = credentials; this.jobToken = jobToken; this.eventHandler = eventHandler; this.jobFile = jobFile; this.partition = partition; //TODO:create the resource reqt for this Task attempt this.resourceCapability = recordFactory.newRecordInstance(Resource.class); this.resourceCapability.setMemory( getMemoryRequired(conf, taskId.getTaskType())); this.resourceCapability.setVirtualCores( getCpuRequired(conf, taskId.getTaskType())); this.resourceCapability.setGpuCores( getGpuRequired(conf, taskId.getTaskType())); this.dataLocalHosts = resolveHosts(dataLocalHosts); RackResolver.init(conf); this.dataLocalRacks = new HashSet<String>(); for (String host : this.dataLocalHosts) { this.dataLocalRacks.add(RackResolver.resolve(host).getNetworkLocation()); } locality = Locality.OFF_SWITCH; avataar = Avataar.VIRGIN; // This "this leak" is okay because the retained pointer is in an // instance variable. stateMachine = stateMachineFactory.make(this); }
Example #26
Source File: TaskAttemptImpl.java From tez with Apache License 2.0 | 4 votes |
@Override public void transition(TaskAttemptImpl ta, TaskAttemptEvent origEvent) { TaskAttemptEventSubmitted event = (TaskAttemptEventSubmitted) origEvent; AMContainer amContainer = ta.appContext.getAllContainers().get(event.getContainerId()); Container container = amContainer.getContainer(); ta.allocationTime = amContainer.getCurrentTaskAttemptAllocationTime(); ta.container = container; ta.containerId = event.getContainerId(); ta.containerNodeId = container.getNodeId(); ta.nodeHttpAddress = StringInterner.weakIntern(container.getNodeHttpAddress()); ta.nodeRackName = StringInterner.weakIntern(RackResolver.resolve(ta.containerNodeId.getHost()) .getNetworkLocation()); ta.lastNotifyProgressTimestamp = ta.clock.getTime(); ta.setLaunchTime(); // TODO Resolve to host / IP in case of a local address. InetSocketAddress nodeHttpInetAddr = NetUtils .createSocketAddr(ta.nodeHttpAddress); // TODO: Costly? ta.trackerName = StringInterner.weakIntern(nodeHttpInetAddr.getHostName()); ta.httpPort = nodeHttpInetAddr.getPort(); ta.sendEvent(createDAGCounterUpdateEventTALaunched(ta)); LOG.info("TaskAttempt: [" + ta.attemptId + "] submitted." + " Is using containerId: [" + ta.containerId + "]" + " on NM: [" + ta.containerNodeId + "]"); // JobHistoryEvent. // The started event represents when the attempt was submitted to the executor. ta.logJobHistoryAttemptStarted(); // TODO Remove after HDFS-5098 // Compute LOCALITY counter for this task. if (ta.taskHosts.contains(ta.containerNodeId.getHost())) { ta.localityCounter = DAGCounter.DATA_LOCAL_TASKS; } else if (ta.taskRacks.contains(ta.nodeRackName)) { ta.localityCounter = DAGCounter.RACK_LOCAL_TASKS; } else { // Not computing this if the task does not have locality information. if (ta.getTaskLocationHint() != null) { ta.localityCounter = DAGCounter.OTHER_LOCAL_TASKS; } } // Inform the Task ta.sendEvent(new TaskEventTALaunched(ta.attemptId)); if (ta.isSpeculationEnabled()) { ta.sendEvent(new SpeculatorEventTaskAttemptStatusUpdate(ta.attemptId, TaskAttemptState.RUNNING, ta.launchTime, true)); } ta.sendEvent( new AMSchedulerEventTAStateUpdated(ta, TaskScheduler.SchedulerTaskState.SUBMITTED, ta.getVertex().getTaskSchedulerIdentifier())); ta.taskHeartbeatHandler.register(ta.attemptId); }
Example #27
Source File: TaskAttemptImpl.java From tez with Apache License 2.0 | 4 votes |
@SuppressWarnings("rawtypes") public TaskAttemptImpl(TezTaskAttemptID attemptId, EventHandler eventHandler, TaskCommunicatorManagerInterface taskCommunicatorManagerInterface, Configuration conf, Clock clock, TaskHeartbeatHandler taskHeartbeatHandler, AppContext appContext, boolean isRescheduled, Resource resource, ContainerContext containerContext, boolean leafVertex, Task task, TaskLocationHint locationHint, TaskSpec taskSpec, TezTaskAttemptID schedulingCausalTA) { ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock(); this.readLock = rwLock.readLock(); this.writeLock = rwLock.writeLock(); this.attemptId = attemptId; this.eventHandler = eventHandler; //Reported status this.conf = conf; this.clock = clock; this.taskHeartbeatHandler = taskHeartbeatHandler; this.appContext = appContext; this.vertex = task.getVertex(); this.task = task; this.locationHint = locationHint; this.taskSpec = taskSpec; this.creationCausalTA = schedulingCausalTA; this.creationTime = clock.getTime(); this.reportedStatus = new TaskAttemptStatus(this.attemptId); initTaskAttemptStatus(reportedStatus); RackResolver.init(conf); this.stateMachine = stateMachineFactory.make(this); this.isRescheduled = isRescheduled; this.taskResource = resource; this.containerContext = containerContext; this.leafVertex = leafVertex; this.hungIntervalMax = conf.getLong( TezConfiguration.TEZ_TASK_PROGRESS_STUCK_INTERVAL_MS, TezConfiguration.TEZ_TASK_PROGRESS_STUCK_INTERVAL_MS_DEFAULT); this.recoveryData = appContext.getDAGRecoveryData() == null ? null : appContext.getDAGRecoveryData().getTaskAttemptRecoveryData(attemptId); }
Example #28
Source File: MockDNSToSwitchMapping.java From tez with Apache License 2.0 | 4 votes |
public static void initializeMockRackResolver() { Configuration rackResolverConf = new Configuration(false); rackResolverConf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MockDNSToSwitchMapping.class.getName()); RackResolver.init(rackResolverConf); }
Example #29
Source File: TestTaskScheduler.java From incubator-tez with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @Test(timeout=5000) public void testTaskSchedulerRandomReuseExpireTime() throws Exception { RackResolver.init(new YarnConfiguration()); TaskSchedulerAppCallback mockApp = mock(TaskSchedulerAppCallback.class); AppContext mockAppContext = mock(AppContext.class); when(mockAppContext.getAMState()).thenReturn(DAGAppMasterState.RUNNING); TezAMRMClientAsync<CookieContainerRequest> mockRMClient = mock(TezAMRMClientAsync.class); String appHost = "host"; int appPort = 0; String appUrl = "url"; TaskSchedulerWithDrainableAppCallback scheduler1 = new TaskSchedulerWithDrainableAppCallback( mockApp, new AlwaysMatchesContainerMatcher(), appHost, appPort, appUrl, mockRMClient, mockAppContext); TaskSchedulerWithDrainableAppCallback scheduler2 = new TaskSchedulerWithDrainableAppCallback( mockApp, new AlwaysMatchesContainerMatcher(), appHost, appPort, appUrl, mockRMClient, mockAppContext); long minTime = 1000l; long maxTime = 100000l; Configuration conf1 = new Configuration(); conf1.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, minTime); conf1.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MAX_MILLIS, minTime); scheduler1.init(conf1); Configuration conf2 = new Configuration(); conf2.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MIN_MILLIS, minTime); conf2.setLong(TezConfiguration.TEZ_AM_CONTAINER_IDLE_RELEASE_TIMEOUT_MAX_MILLIS, maxTime); scheduler2.init(conf2); RegisterApplicationMasterResponse mockRegResponse = mock(RegisterApplicationMasterResponse.class); Resource mockMaxResource = mock(Resource.class); Map<ApplicationAccessType, String> mockAcls = mock(Map.class); when(mockRegResponse.getMaximumResourceCapability()). thenReturn(mockMaxResource); when(mockRegResponse.getApplicationACLs()).thenReturn(mockAcls); when(mockRMClient. registerApplicationMaster(anyString(), anyInt(), anyString())). thenReturn(mockRegResponse); Resource mockClusterResource = mock(Resource.class); when(mockRMClient.getAvailableResources()). thenReturn(mockClusterResource); scheduler1.start(); scheduler2.start(); // when min == max the expire time is always min for (int i=0; i<10; ++i) { Assert.assertEquals(minTime, scheduler1.getHeldContainerExpireTime(0)); } long lastExpireTime = 0; // when min < max the expire time is random in between min and max for (int i=0; i<10; ++i) { long currExpireTime = scheduler2.getHeldContainerExpireTime(0); Assert.assertTrue( "min: " + minTime + " curr: " + currExpireTime + " max: " + maxTime, (minTime <= currExpireTime && currExpireTime <= maxTime)); Assert.assertNotEquals(lastExpireTime, currExpireTime); lastExpireTime = currExpireTime; } String appMsg = "success"; AppFinalStatus finalStatus = new AppFinalStatus(FinalApplicationStatus.SUCCEEDED, appMsg, appUrl); when(mockApp.getFinalAppStatus()).thenReturn(finalStatus); scheduler1.stop(); scheduler1.close(); scheduler2.stop(); scheduler2.close(); }
Example #30
Source File: TestTezAMRMClient.java From incubator-tez with Apache License 2.0 | 4 votes |
@Before public void setup() { amrmClient = new TezAMRMClientAsync(new AMRMClientImpl(), 1000, mock(AMRMClientAsync.CallbackHandler.class)); RackResolver.init(new Configuration()); }