org.apache.hadoop.mapreduce.v2.api.records.JobId Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.v2.api.records.JobId.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRMContainerAllocator.java From hadoop with Apache License 2.0 | 6 votes |
private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts, boolean earlierFailedAttempt, boolean reduce) { TaskId taskId; if (reduce) { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE); } else { taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP); } TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId, taskAttemptId); Resource containerNeed = Resource.newInstance(memory, 1); if (earlierFailedAttempt) { return ContainerRequestEvent .createContainerRequestEventForFailedContainer(attemptId, containerNeed); } return new ContainerRequestEvent(attemptId, containerNeed, hosts, new String[] { NetworkTopology.DEFAULT_RACK }); }
Example #2
Source File: MRCommunicator.java From jumbune with GNU Lesser General Public License v3.0 | 6 votes |
/** * This method tries to extract all Map OR Reduce attempt Task Reports for a given Job Id * @param taskType, TaskType {MAP|REDUCE} * @param jobId, the Job Id for which all Task Reports requires to be extracted * @return, Map<TaskId, TaskReport> * @throws IOException */ public Map<TaskId, TaskReport> getTaskTypeWiseTaskReports(TaskType taskType, JobId jobId) throws IOException{ Map<TaskId, TaskReport> reports = new HashMap<TaskId, TaskReport>(); TaskReport report; //Attempting to extract Task Type wise Attempt Reports boolean rme = false; int id = 0; do{ try{ report = getTaskReport(jobId, id, taskType); TaskId taskId = MRBuilderUtils.newTaskId(jobId, id, taskType); reports.put(taskId, report); id++; }catch(RemoteException re){ rme = true; } }while(!rme); return reports; }
Example #3
Source File: CachedHistoryStorage.java From hadoop with Apache License 2.0 | 6 votes |
@Override public Job getFullJob(JobId jobId) { if (LOG.isDebugEnabled()) { LOG.debug("Looking for Job " + jobId); } try { HistoryFileInfo fileInfo = hsManager.getFileInfo(jobId); Job result = null; if (fileInfo != null) { result = loadedJobCache.get(jobId); if (result == null) { result = loadJob(fileInfo); } else if(fileInfo.isDeleted()) { loadedJobCache.remove(jobId); result = null; } } else { loadedJobCache.remove(jobId); } return result; } catch (IOException e) { throw new YarnRuntimeException(e); } }
Example #4
Source File: MockHistoryJobs.java From big-c with Apache License 2.0 | 6 votes |
private static JobsPair split(Map<JobId, Job> mocked) throws IOException { JobsPair ret = new JobsPair(); ret.full = Maps.newHashMap(); ret.partial = Maps.newHashMap(); for(Map.Entry<JobId, Job> entry: mocked.entrySet()) { JobId id = entry.getKey(); Job j = entry.getValue(); MockCompletedJob mockJob = new MockCompletedJob(j); // use MockCompletedJob to set everything below to make sure // consistent with what history server would do ret.full.put(id, mockJob); JobReport report = mockJob.getReport(); JobIndexInfo info = new JobIndexInfo(report.getStartTime(), report.getFinishTime(), mockJob.getUserName(), mockJob.getName(), id, mockJob.getCompletedMaps(), mockJob.getCompletedReduces(), String.valueOf(mockJob.getState())); info.setJobStartTime(report.getStartTime()); info.setQueueName(mockJob.getQueueName()); ret.partial.put(id, new PartialJob(info, id)); } return ret; }
Example #5
Source File: TestAMWebServicesJobConf.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testJobConfSlash() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("conf/") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("conf"); verifyAMJobConf(info, jobsMap.get(id)); } }
Example #6
Source File: TestJobHistoryParsing.java From hadoop with Apache License 2.0 | 6 votes |
/** * Simple test PartialJob */ @Test(timeout = 3000) public void testPartialJob() throws Exception { JobId jobId = new JobIdPBImpl(); jobId.setId(0); JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user", "jobName", jobId, 3, 2, "JobStatus"); PartialJob test = new PartialJob(jii, jobId); assertEquals(1.0f, test.getProgress(), 0.001); assertNull(test.getAllCounters()); assertNull(test.getTasks()); assertNull(test.getTasks(TaskType.MAP)); assertNull(test.getTask(new TaskIdPBImpl())); assertNull(test.getTaskAttemptCompletionEvents(0, 100)); assertNull(test.getMapAttemptCompletionEvents(0, 100)); assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null)); assertNull(test.getAMInfos()); }
Example #7
Source File: TestStagingCleanup.java From hadoop with Apache License 2.0 | 6 votes |
@Test (timeout = 30000) public void testDeletionofStagingOnKill() throws IOException { conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir); fs = mock(FileSystem.class); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); //Staging Dir exists String user = UserGroupInformation.getCurrentUser().getShortUserName(); Path stagingDir = MRApps.getStagingAreaDir(conf, user); when(fs.exists(stagingDir)).thenReturn(true); ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 0); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 0); JobId jobid = recordFactory.newRecordInstance(JobId.class); jobid.setAppId(appId); ContainerAllocator mockAlloc = mock(ContainerAllocator.class); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); appMaster.init(conf); //simulate the process being killed MRAppMaster.MRAppMasterShutdownHook hook = new MRAppMaster.MRAppMasterShutdownHook(appMaster); hook.run(); verify(fs, times(0)).delete(stagingJobPath, true); }
Example #8
Source File: TestHsWebServicesJobs.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testJobAttemptsXML() throws Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").path(jobId).path("jobattempts") .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType()); String xml = response.getEntity(String.class); DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); InputSource is = new InputSource(); is.setCharacterStream(new StringReader(xml)); Document dom = db.parse(is); NodeList attempts = dom.getElementsByTagName("jobAttempts"); assertEquals("incorrect number of elements", 1, attempts.getLength()); NodeList info = dom.getElementsByTagName("jobAttempt"); verifyHsJobAttemptsXML(info, appContext.getJob(id)); } }
Example #9
Source File: TestHsWebServicesJobs.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testJobAttempts() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").path(jobId).path("jobattempts") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info, appContext.getJob(id)); } }
Example #10
Source File: TestHsWebServicesJobs.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testJobAttemptsDefault() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").path(jobId).path("jobattempts") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info, appContext.getJob(id)); } }
Example #11
Source File: TestFileNameIndexUtils.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testQueueNamePercentEncoding() throws IOException { JobIndexInfo info = new JobIndexInfo(); JobID oldJobId = JobID.forName(JOB_ID); JobId jobId = TypeConverter.toYarn(oldJobId); info.setJobId(jobId); info.setSubmitTime(Long.parseLong(SUBMIT_TIME)); info.setUser(USER_NAME); info.setJobName(JOB_NAME); info.setFinishTime(Long.parseLong(FINISH_TIME)); info.setNumMaps(Integer.parseInt(NUM_MAPS)); info.setNumReduces(Integer.parseInt(NUM_REDUCES)); info.setJobStatus(JOB_STATUS); info.setQueueName(QUEUE_NAME_WITH_DELIMITER); info.setJobStartTime(Long.parseLong(JOB_START_TIME)); String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info); Assert.assertTrue("Queue name not encoded correctly into job history file", jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE)); }
Example #12
Source File: StartEndTimesBase.java From big-c with Apache License 2.0 | 6 votes |
protected DataStatistics dataStatisticsForTask(TaskId taskID) { JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); if (job == null) { return null; } Task task = job.getTask(taskID); if (task == null) { return null; } return task.getType() == TaskType.MAP ? mapperStatistics.get(job) : task.getType() == TaskType.REDUCE ? reducerStatistics.get(job) : null; }
Example #13
Source File: StartEndTimesBase.java From hadoop with Apache License 2.0 | 6 votes |
protected DataStatistics dataStatisticsForTask(TaskId taskID) { JobId jobID = taskID.getJobId(); Job job = context.getJob(jobID); if (job == null) { return null; } Task task = job.getTask(taskID); if (task == null) { return null; } return task.getType() == TaskType.MAP ? mapperStatistics.get(job) : task.getType() == TaskType.REDUCE ? reducerStatistics.get(job) : null; }
Example #14
Source File: TestAMWebServicesJobs.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testJobCountersDefault() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("counters/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobCounters"); verifyAMJobCounters(info, jobsMap.get(id)); } }
Example #15
Source File: TestHsWebServicesJobs.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testJobAttemptsDefault() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("history") .path("mapreduce").path("jobs").path(jobId).path("jobattempts") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobAttempts"); verifyHsJobAttempts(info, appContext.getJob(id)); } }
Example #16
Source File: TestAMWebServicesTasks.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testTaskIdDefault() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); for (Task task : jobsMap.get(id).getTasks().values()) { String tid = MRApps.toString(task.getID()); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("tasks").path(tid) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("task"); verifyAMSingleTask(info, task); } } }
Example #17
Source File: DefaultSpeculator.java From hadoop with Apache License 2.0 | 6 votes |
private AtomicInteger containerNeed(TaskId taskID) { JobId jobID = taskID.getJobId(); TaskType taskType = taskID.getTaskType(); ConcurrentMap<JobId, AtomicInteger> relevantMap = taskType == TaskType.MAP ? mapContainerNeeds : reduceContainerNeeds; AtomicInteger result = relevantMap.get(jobID); if (result == null) { relevantMap.putIfAbsent(jobID, new AtomicInteger(0)); result = relevantMap.get(jobID); } return result; }
Example #18
Source File: TestAMWebServicesAttempts.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testTaskAttempts() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); for (Task task : jobsMap.get(id).getTasks().values()) { String tid = MRApps.toString(task.getID()); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("tasks").path(tid).path("attempts") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); verifyAMTaskAttempts(json, task); } } }
Example #19
Source File: TestAMWebServicesAttempts.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testTaskAttemptsDefault() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); for (Task task : jobsMap.get(id).getTasks().values()) { String tid = MRApps.toString(task.getID()); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("tasks").path(tid).path("attempts") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); verifyAMTaskAttempts(json, task); } } }
Example #20
Source File: TestAMWebServicesJobs.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testJobCounters() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("counters") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobCounters"); verifyAMJobCounters(info, jobsMap.get(id)); } }
Example #21
Source File: TestJobImpl.java From hadoop with Apache License 2.0 | 6 votes |
private static StubbedJob createStubbedJob(Configuration conf, Dispatcher dispatcher, int numSplits, AppContext appContext) { JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); if (appContext == null) { appContext = mock(AppContext.class); when(appContext.hasSuccessfullyUnregistered()).thenReturn(true); } StubbedJob job = new StubbedJob(jobId, ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0), conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext); dispatcher.register(JobEventType.class, job); EventHandler mockHandler = mock(EventHandler.class); dispatcher.register(TaskEventType.class, mockHandler); dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, mockHandler); dispatcher.register(JobFinishEvent.Type.class, mockHandler); return job; }
Example #22
Source File: CommitterJobAbortEvent.java From big-c with Apache License 2.0 | 5 votes |
public CommitterJobAbortEvent(JobId jobID, JobContext jobContext, JobStatus.State finalState) { super(CommitterEventType.JOB_ABORT); this.jobID = jobID; this.jobContext = jobContext; this.finalState = finalState; }
Example #23
Source File: TestAMWebServicesAttempts.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testTaskAttemptIdCounters() throws JSONException, Exception { WebResource r = resource(); Map<JobId, Job> jobsMap = appContext.getAllJobs(); for (JobId id : jobsMap.keySet()) { String jobId = MRApps.toString(id); for (Task task : jobsMap.get(id).getTasks().values()) { String tid = MRApps.toString(task.getID()); for (TaskAttempt att : task.getAttempts().values()) { TaskAttemptId attemptid = att.getID(); String attid = MRApps.toString(attemptid); ClientResponse response = r.path("ws").path("v1").path("mapreduce") .path("jobs").path(jobId).path("tasks").path(tid) .path("attempts").path(attid).path("counters") .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); JSONObject info = json.getJSONObject("jobTaskAttemptCounters"); verifyAMJobTaskAttemptCounters(info, att); } } } }
Example #24
Source File: CachedHistoryStorage.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("serial") private void createLoadedJobCache(Configuration conf) { loadedJobCacheSize = conf.getInt( JHAdminConfig.MR_HISTORY_LOADED_JOB_CACHE_SIZE, JHAdminConfig.DEFAULT_MR_HISTORY_LOADED_JOB_CACHE_SIZE); loadedJobCache = Collections.synchronizedMap(new LinkedHashMap<JobId, Job>( loadedJobCacheSize + 1, 0.75f, true) { @Override public boolean removeEldestEntry(final Map.Entry<JobId, Job> eldest) { return super.size() > loadedJobCacheSize; } }); }
Example #25
Source File: GetTaskAttemptCompletionEventsRequestPBImpl.java From big-c with Apache License 2.0 | 5 votes |
@Override public void setJobId(JobId jobId) { maybeInitBuilder(); if (jobId == null) builder.clearJobId(); this.jobId = jobId; }
Example #26
Source File: TestStagingCleanup.java From big-c with Apache License 2.0 | 5 votes |
@Override protected Job createJob(Configuration conf, JobStateInternal forcedState, String diagnostic) { JobImpl jobImpl = mock(JobImpl.class); when(jobImpl.getInternalState()).thenReturn(this.jobStateInternal); when(jobImpl.getAllCounters()).thenReturn(new Counters()); JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); when(jobImpl.getID()).thenReturn(jobId); ((AppContext) getContext()) .getAllJobs().put(jobImpl.getID(), jobImpl); return jobImpl; }
Example #27
Source File: TestJobHistoryEventHandler.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void processDoneFiles(JobId jobId) throws IOException { if (!mockHistoryProcessing) { super.processDoneFiles(jobId); } else { // do nothing } }
Example #28
Source File: GetTaskReportsRequestPBImpl.java From hadoop with Apache License 2.0 | 5 votes |
@Override public void setJobId(JobId jobId) { maybeInitBuilder(); if (jobId == null) builder.clearJobId(); this.jobId = jobId; }
Example #29
Source File: TestJobInfo.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout = 10000) public void testAverageMergeTime() throws IOException { String historyFileName = "job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist"; String confFileName = "job_1329348432655_0001_conf.xml"; Configuration conf = new Configuration(); JobACLsManager jobAclsMgr = new JobACLsManager(conf); Path fulleHistoryPath = new Path(TestJobHistoryEntities.class.getClassLoader() .getResource(historyFileName) .getFile()); Path fullConfPath = new Path(TestJobHistoryEntities.class.getClassLoader() .getResource(confFileName) .getFile()); HistoryFileInfo info = mock(HistoryFileInfo.class); when(info.getConfFile()).thenReturn(fullConfPath); JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1); CompletedJob completedJob = new CompletedJob(conf, jobId, fulleHistoryPath, true, "user", info, jobAclsMgr); JobInfo jobInfo = new JobInfo(completedJob); // There are 2 tasks with merge time of 45 and 55 respectively. So average // merge time should be 50. Assert.assertEquals(50L, jobInfo.getAvgMergeTime().longValue()); }
Example #30
Source File: JobHistoryEventHandler.java From big-c with Apache License 2.0 | 5 votes |
/** Close the event writer for this id * @throws IOException */ public void closeWriter(JobId id) throws IOException { try { final MetaInfo mi = fileMap.get(id); if (mi != null) { mi.closeWriter(); } } catch (IOException e) { LOG.error("Error closing writer for JobID: " + id); throw e; } }