org.apache.hadoop.mapreduce.Counters Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.Counters.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestEvents.java From big-c with Apache License 2.0 | 6 votes |
/** * test a getters of TaskAttemptFinishedEvent and TaskAttemptFinished * * @throws Exception */ @Test(timeout = 10000) public void testTaskAttemptFinishedEvent() throws Exception { JobID jid = new JobID("001", 1); TaskID tid = new TaskID(jid, TaskType.REDUCE, 2); TaskAttemptID taskAttemptId = new TaskAttemptID(tid, 3); Counters counters = new Counters(); TaskAttemptFinishedEvent test = new TaskAttemptFinishedEvent(taskAttemptId, TaskType.REDUCE, "TEST", 123L, "RAKNAME", "HOSTNAME", "STATUS", counters); assertEquals(test.getAttemptId().toString(), taskAttemptId.toString()); assertEquals(test.getCounters(), counters); assertEquals(test.getFinishTime(), 123L); assertEquals(test.getHostname(), "HOSTNAME"); assertEquals(test.getRackName(), "RAKNAME"); assertEquals(test.getState(), "STATUS"); assertEquals(test.getTaskId(), tid); assertEquals(test.getTaskStatus(), "TEST"); assertEquals(test.getTaskType(), TaskType.REDUCE); }
Example #2
Source File: IntegrationTestBigLinkedList.java From hbase with Apache License 2.0 | 6 votes |
/** * Verify the values in the Counters against the expected number of entries written. * * @param expectedReferenced * Expected number of referenced entrires * @param counters * The Job's Counters object * @return True if the values match what's expected, false otherwise */ protected boolean verifyExpectedValues(long expectedReferenced, Counters counters) { final Counter referenced = counters.findCounter(Counts.REFERENCED); final Counter unreferenced = counters.findCounter(Counts.UNREFERENCED); boolean success = true; if (expectedReferenced != referenced.getValue()) { LOG.error("Expected referenced count does not match with actual referenced count. " + "expected referenced=" + expectedReferenced + " ,actual=" + referenced.getValue()); success = false; } if (unreferenced.getValue() > 0) { final Counter multiref = counters.findCounter(Counts.EXTRAREFERENCES); boolean couldBeMultiRef = (multiref.getValue() == unreferenced.getValue()); LOG.error("Unreferenced nodes were not expected. Unreferenced count=" + unreferenced.getValue() + (couldBeMultiRef ? "; could be due to duplicate random numbers" : "")); success = false; } return success; }
Example #3
Source File: TestFetchFailure.java From big-c with Apache License 2.0 | 6 votes |
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) { TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus(); status.counters = new Counters(); status.fetchFailedMaps = new ArrayList<TaskAttemptId>(); status.id = attempt.getID(); status.mapFinishTime = 0; status.phase = phase; status.progress = 0.5f; status.shuffleFinishTime = 0; status.sortFinishTime = 0; status.stateString = "OK"; status.taskState = attempt.getState(); TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(), status); app.getContext().getEventHandler().handle(event); }
Example #4
Source File: JobImpl.java From big-c with Apache License 2.0 | 6 votes |
@Override public Counters getAllCounters() { readLock.lock(); try { JobStateInternal state = getInternalState(); if (state == JobStateInternal.ERROR || state == JobStateInternal.FAILED || state == JobStateInternal.KILLED || state == JobStateInternal.SUCCEEDED) { this.mayBeConstructFinalFullCounters(); return fullCounters; } Counters counters = new Counters(); counters.incrAllCounters(jobCounters); return incrTaskCounters(counters, tasks.values()); } finally { readLock.unlock(); } }
Example #5
Source File: BlurInputFormatTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
private void assertMapTask(int i, Counters counters) { for (CounterGroup counterGroup : counters) { String name = counterGroup.getName(); boolean jobCounterGroup = false; if (name.equals("org.apache.hadoop.mapreduce.JobCounter")) { jobCounterGroup = true; } else if (name.equals("org.apache.hadoop.mapred.JobInProgress$Counter")) { jobCounterGroup = true; } if (jobCounterGroup) { for (Counter counter : counterGroup) { if (counter.getName().equals("TOTAL_LAUNCHED_MAPS")) { assertEquals(1, counter.getValue()); return; } } } } fail(); }
Example #6
Source File: IndexScrutinyToolIT.java From phoenix with Apache License 2.0 | 6 votes |
/** * Tests an index with the same # of rows as the data table, but one of the index rows is * incorrect Scrutiny should report the invalid rows. */ @Test public void testEqualRowCountIndexIncorrect() throws Exception { // insert one valid row upsertRow(dataTableUpsertStmt, 1, "name-1", 94010); conn.commit(); // disable the index and insert another row which is not indexed disableIndex(); upsertRow(dataTableUpsertStmt, 2, "name-2", 95123); conn.commit(); // insert a bad row into the index upsertIndexRow("badName", 2, 9999); conn.commit(); // scrutiny should report the bad row List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName); Job job = completedJobs.get(0); assertTrue(job.isSuccessful()); Counters counters = job.getCounters(); assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT)); assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT)); }
Example #7
Source File: TaskImpl.java From big-c with Apache License 2.0 | 6 votes |
@Override public Counters getCounters() { Counters counters = null; readLock.lock(); try { TaskAttempt bestAttempt = selectBestAttempt(); if (bestAttempt != null) { counters = bestAttempt.getCounters(); } else { counters = TaskAttemptImpl.EMPTY_COUNTERS; // counters.groups = new HashMap<CharSequence, CounterGroup>(); } return counters; } finally { readLock.unlock(); } }
Example #8
Source File: CompletedTask.java From big-c with Apache License 2.0 | 6 votes |
private void constructTaskReport() { loadAllTaskAttempts(); this.report = Records.newRecord(TaskReport.class); report.setTaskId(taskId); long minLaunchTime = Long.MAX_VALUE; for(TaskAttempt attempt: attempts.values()) { minLaunchTime = Math.min(minLaunchTime, attempt.getLaunchTime()); } minLaunchTime = minLaunchTime == Long.MAX_VALUE ? -1 : minLaunchTime; report.setStartTime(minLaunchTime); report.setFinishTime(taskInfo.getFinishTime()); report.setTaskState(getState()); report.setProgress(getProgress()); Counters counters = getCounters(); if (counters == null) { counters = EMPTY_COUNTERS; } report.setCounters(TypeConverter.toYarn(counters)); if (successfulAttempt != null) { report.setSuccessfulAttempt(successfulAttempt); } report.addAllDiagnostics(reportDiagnostics); report .addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet())); }
Example #9
Source File: CounterDump.java From datawave with Apache License 2.0 | 6 votes |
public String toString() { StringBuilder builder = new StringBuilder(); while (source.hasNext()) { Entry<String,Counters> nextCntr = source.next(); builder.append("\n").append(nextCntr.getKey()).append("\n----------------------\n"); Counters counters = nextCntr.getValue(); for (String groupName : counters.getGroupNames()) { CounterGroup group = counters.getGroup(groupName); Iterator<Counter> cntrItr = group.iterator(); while (cntrItr.hasNext()) { Counter counter = cntrItr.next(); builder.append(groupName).append("\t").append(counter.getDisplayName()).append("=").append(counter.getValue()).append("\n"); } } } return builder.toString(); }
Example #10
Source File: IndexScrutinyToolIT.java From phoenix with Apache License 2.0 | 6 votes |
/** * Tests when there are more data table rows than index table rows Scrutiny should report the * number of incorrect rows */ @Test public void testMoreDataRows() throws Exception { upsertRow(dataTableUpsertStmt, 1, "name-1", 95123); conn.commit(); disableIndex(); // these rows won't have a corresponding index row upsertRow(dataTableUpsertStmt, 2, "name-2", 95124); upsertRow(dataTableUpsertStmt, 3, "name-3", 95125); conn.commit(); List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName); Job job = completedJobs.get(0); assertTrue(job.isSuccessful()); Counters counters = job.getCounters(); assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT)); assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT)); }
Example #11
Source File: ExportJobBase.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 6 votes |
@Override protected boolean runJob(Job job) throws ClassNotFoundException, IOException, InterruptedException { PerfCounters perfCounters = new PerfCounters(); perfCounters.startClock(); boolean success = doSubmitJob(job); perfCounters.stopClock(); Counters jobCounters = job.getCounters(); // If the job has been retired, these may be unavailable. if (null == jobCounters) { displayRetiredJobNotice(LOG); } else { perfCounters.addBytes(jobCounters.getGroup("FileSystemCounters") .findCounter("HDFS_BYTES_READ").getValue()); LOG.info("Transferred " + perfCounters.toString()); long numRecords = ConfigurationHelper.getNumMapInputRecords(job); LOG.info("Exported " + numRecords + " records."); } return success; }
Example #12
Source File: MapAttemptFinishedEvent.java From hadoop with Apache License 2.0 | 6 votes |
/** * Create an event for successful completion of map attempts * @param id Task Attempt ID * @param taskType Type of the task * @param taskStatus Status of the task * @param mapFinishTime Finish time of the map phase * @param finishTime Finish time of the attempt * @param hostname Name of the host where the map executed * @param port RPC port for the tracker host. * @param rackName Name of the rack where the map executed * @param state State string for the attempt * @param counters Counters for the attempt * @param allSplits the "splits", or a pixelated graph of various * measurable worker node state variables against progress. * Currently there are four; wallclock time, CPU time, * virtual memory and physical memory. * * If you have no splits data, code {@code null} for this * parameter. */ public MapAttemptFinishedEvent (TaskAttemptID id, TaskType taskType, String taskStatus, long mapFinishTime, long finishTime, String hostname, int port, String rackName, String state, Counters counters, int[][] allSplits) { this.attemptId = id; this.taskType = taskType; this.taskStatus = taskStatus; this.mapFinishTime = mapFinishTime; this.finishTime = finishTime; this.hostname = hostname; this.rackName = rackName; this.port = port; this.state = state; this.counters = counters; this.allSplits = allSplits; this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits); this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits); this.gpuUsages = ProgressSplitsBlock.arrayGetGPUTime(allSplits); this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits); this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits); }
Example #13
Source File: TaskAttemptFinishedEvent.java From big-c with Apache License 2.0 | 5 votes |
/** * Create an event to record successful finishes for setup and cleanup * attempts * @param id Attempt ID * @param taskType Type of task * @param taskStatus Status of task * @param finishTime Finish time of attempt * @param hostname Host where the attempt executed * @param state State string * @param counters Counters for the attempt */ public TaskAttemptFinishedEvent(TaskAttemptID id, TaskType taskType, String taskStatus, long finishTime, String rackName, String hostname, String state, Counters counters) { this.attemptId = id; this.taskType = taskType; this.taskStatus = taskStatus; this.finishTime = finishTime; this.rackName = rackName; this.hostname = hostname; this.state = state; this.counters = counters; }
Example #14
Source File: TestJobHistoryEventHandler.java From big-c with Apache License 2.0 | 5 votes |
@Test (timeout=50000) public void testCountersToJSON() throws Exception { JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0); Counters counters = new Counters(); CounterGroup group1 = counters.addGroup("DOCTORS", "Incarnations of the Doctor"); group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12); group1.addCounter("MATT_SMITH", "Matt Smith", 11); group1.addCounter("DAVID_TENNANT", "David Tennant", 10); CounterGroup group2 = counters.addGroup("COMPANIONS", "Companions of the Doctor"); group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6); group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5); group2.addCounter("AMY_POND", "Amy Pond", 4); group2.addCounter("MARTHA_JONES", "Martha Jones", 3); group2.addCounter("DONNA_NOBLE", "Donna Noble", 2); group2.addCounter("ROSE_TYLER", "Rose Tyler", 1); JsonNode jsonNode = jheh.countersToJSON(counters); String jsonStr = new ObjectMapper().writeValueAsString(jsonNode); String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions " + "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\"" + ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\"," + "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":" + "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2}," + "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\"," + "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory " + "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":" + "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\"" + ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":" + "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10}," + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":" + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\"," + "\"VALUE\":12}]}]"; Assert.assertEquals(expected, jsonStr); }
Example #15
Source File: TestMRJobs.java From hadoop with Apache License 2.0 | 5 votes |
protected void verifyFailingMapperCounters(Job job) throws InterruptedException, IOException { Counters counters = job.getCounters(); Assert.assertEquals(2, counters.findCounter(JobCounter.OTHER_LOCAL_MAPS) .getValue()); Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS) .getValue()); Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_FAILED_MAPS) .getValue()); Assert .assertTrue(counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS) != null && counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue() != 0); }
Example #16
Source File: TestUberAM.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void verifyFailingMapperCounters(Job job) throws InterruptedException, IOException { Counters counters = job.getCounters(); super.verifyFailingMapperCounters(job); Assert.assertEquals(2, counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue()); Assert.assertEquals(2, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS) .getValue()); Assert.assertEquals(2, counters .findCounter(JobCounter.NUM_FAILED_UBERTASKS).getValue()); }
Example #17
Source File: TestJobHistoryEventHandler.java From hadoop with Apache License 2.0 | 5 votes |
@Test (timeout=50000) public void testCountersToJSON() throws Exception { JobHistoryEventHandler jheh = new JobHistoryEventHandler(null, 0); Counters counters = new Counters(); CounterGroup group1 = counters.addGroup("DOCTORS", "Incarnations of the Doctor"); group1.addCounter("PETER_CAPALDI", "Peter Capaldi", 12); group1.addCounter("MATT_SMITH", "Matt Smith", 11); group1.addCounter("DAVID_TENNANT", "David Tennant", 10); CounterGroup group2 = counters.addGroup("COMPANIONS", "Companions of the Doctor"); group2.addCounter("CLARA_OSWALD", "Clara Oswald", 6); group2.addCounter("RORY_WILLIAMS", "Rory Williams", 5); group2.addCounter("AMY_POND", "Amy Pond", 4); group2.addCounter("MARTHA_JONES", "Martha Jones", 3); group2.addCounter("DONNA_NOBLE", "Donna Noble", 2); group2.addCounter("ROSE_TYLER", "Rose Tyler", 1); JsonNode jsonNode = jheh.countersToJSON(counters); String jsonStr = new ObjectMapper().writeValueAsString(jsonNode); String expected = "[{\"NAME\":\"COMPANIONS\",\"DISPLAY_NAME\":\"Companions " + "of the Doctor\",\"COUNTERS\":[{\"NAME\":\"AMY_POND\",\"DISPLAY_NAME\"" + ":\"Amy Pond\",\"VALUE\":4},{\"NAME\":\"CLARA_OSWALD\"," + "\"DISPLAY_NAME\":\"Clara Oswald\",\"VALUE\":6},{\"NAME\":" + "\"DONNA_NOBLE\",\"DISPLAY_NAME\":\"Donna Noble\",\"VALUE\":2}," + "{\"NAME\":\"MARTHA_JONES\",\"DISPLAY_NAME\":\"Martha Jones\"," + "\"VALUE\":3},{\"NAME\":\"RORY_WILLIAMS\",\"DISPLAY_NAME\":\"Rory " + "Williams\",\"VALUE\":5},{\"NAME\":\"ROSE_TYLER\",\"DISPLAY_NAME\":" + "\"Rose Tyler\",\"VALUE\":1}]},{\"NAME\":\"DOCTORS\",\"DISPLAY_NAME\"" + ":\"Incarnations of the Doctor\",\"COUNTERS\":[{\"NAME\":" + "\"DAVID_TENNANT\",\"DISPLAY_NAME\":\"David Tennant\",\"VALUE\":10}," + "{\"NAME\":\"MATT_SMITH\",\"DISPLAY_NAME\":\"Matt Smith\",\"VALUE\":" + "11},{\"NAME\":\"PETER_CAPALDI\",\"DISPLAY_NAME\":\"Peter Capaldi\"," + "\"VALUE\":12}]}]"; Assert.assertEquals(expected, jsonStr); }
Example #18
Source File: TestUberAM.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void verifySleepJobCounters(Job job) throws InterruptedException, IOException { Counters counters = job.getCounters(); super.verifySleepJobCounters(job); Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS).getValue()); Assert.assertEquals(numSleepReducers, counters.findCounter(JobCounter.NUM_UBER_SUBREDUCES).getValue()); Assert.assertEquals(3 + numSleepReducers, counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue()); }
Example #19
Source File: TestMiniMRClientCluster.java From hadoop with Apache License 2.0 | 5 votes |
private void validateCounters(Counters counters, long mapInputRecords, long mapOutputRecords, long reduceInputGroups, long reduceOutputRecords) { assertEquals("MapInputRecords", mapInputRecords, counters.findCounter( "MyCounterGroup", "MAP_INPUT_RECORDS").getValue()); assertEquals("MapOutputRecords", mapOutputRecords, counters.findCounter( "MyCounterGroup", "MAP_OUTPUT_RECORDS").getValue()); assertEquals("ReduceInputGroups", reduceInputGroups, counters.findCounter( "MyCounterGroup", "REDUCE_INPUT_GROUPS").getValue()); assertEquals("ReduceOutputRecords", reduceOutputRecords, counters .findCounter("MyCounterGroup", "REDUCE_OUTPUT_RECORDS").getValue()); }
Example #20
Source File: TaskFailedEvent.java From big-c with Apache License 2.0 | 5 votes |
/** * Create an event to record task failure * @param id Task ID * @param finishTime Finish time of the task * @param taskType Type of the task * @param error Error String * @param status Status * @param failedDueToAttempt The attempt id due to which the task failed * @param counters Counters for the task */ public TaskFailedEvent(TaskID id, long finishTime, TaskType taskType, String error, String status, TaskAttemptID failedDueToAttempt, Counters counters) { this.id = id; this.finishTime = finishTime; this.taskType = taskType; this.error = error; this.status = status; this.failedDueToAttempt = failedDueToAttempt; this.counters = counters; }
Example #21
Source File: TestUberAM.java From hadoop with Apache License 2.0 | 5 votes |
@Override protected void verifyRandomWriterCounters(Job job) throws InterruptedException, IOException { super.verifyRandomWriterCounters(job); Counters counters = job.getCounters(); Assert.assertEquals(3, counters.findCounter(JobCounter.NUM_UBER_SUBMAPS) .getValue()); Assert.assertEquals(3, counters.findCounter(JobCounter.TOTAL_LAUNCHED_UBERTASKS).getValue()); }
Example #22
Source File: TaskAttemptImpl.java From big-c with Apache License 2.0 | 5 votes |
private void updateProgressSplits() { double newProgress = reportedStatus.progress; newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D); Counters counters = reportedStatus.counters; if (counters == null) return; WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock(); if (splitsBlock != null) { long now = clock.getTime(); long start = getLaunchTime(); // TODO Ensure not 0 if (start != 0 && now - start <= Integer.MAX_VALUE) { splitsBlock.getProgressWallclockTime().extend(newProgress, (int) (now - start)); } Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS); if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) { splitsBlock.getProgressCPUTime().extend(newProgress, (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below } Counter virtualBytes = counters .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES); if (virtualBytes != null) { splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress, (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } Counter physicalBytes = counters .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES); if (physicalBytes != null) { splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress, (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } } }
Example #23
Source File: ScriptStats.java From Cubert with Apache License 2.0 | 5 votes |
public ScriptStats() { conf = new Configuration(); try { jobClient = new JobClient(new JobConf(conf)); } catch (IOException e) { throw new RuntimeException(e); } aggregate.startTime = System.currentTimeMillis(); aggregate.counters = new Counters(); }
Example #24
Source File: IndexerJobDriver.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private boolean runMrOnly(TableDescriptor descriptor, List<Path> inprogressPathList, String table, Path fileCache, Path outputPath, int reducerMultipler) throws IOException, ClassNotFoundException, InterruptedException { Job job = Job.getInstance(getConf(), "Blur Row Updater for table [" + table + "]"); Path tablePath = new Path(descriptor.getTableUri()); BlurInputFormat.setLocalCachePath(job, fileCache); BlurInputFormat.addTable(job, descriptor, MRUPDATE_SNAPSHOT); MultipleInputs.addInputPath(job, tablePath, BlurInputFormat.class, ExistingDataMapper.class); for (Path p : inprogressPathList) { FileInputFormat.addInputPath(job, p); MultipleInputs.addInputPath(job, p, SequenceFileInputFormat.class, NewDataMapper.class); } BlurOutputFormat.setOutputPath(job, outputPath); BlurOutputFormat.setupJob(job, descriptor); job.setReducerClass(UpdateReducer.class); job.setMapOutputKeyClass(IndexKey.class); job.setMapOutputValueClass(IndexValue.class); job.setPartitionerClass(IndexKeyPartitioner.class); job.setGroupingComparatorClass(IndexKeyWritableComparator.class); BlurOutputFormat.setReducerMultiplier(job, reducerMultipler); boolean success = job.waitForCompletion(true); Counters counters = job.getCounters(); LOG.info("Counters [" + counters + "]"); return success; }
Example #25
Source File: TestJobHistoryEventHandler.java From big-c with Apache License 2.0 | 5 votes |
private AppContext mockAppContext(ApplicationId appId, boolean isLastAMRetry) { JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId)); AppContext mockContext = mock(AppContext.class); Job mockJob = mock(Job.class); when(mockJob.getAllCounters()).thenReturn(new Counters()); when(mockJob.getTotalMaps()).thenReturn(10); when(mockJob.getTotalReduces()).thenReturn(10); when(mockJob.getName()).thenReturn("mockjob"); when(mockContext.getJob(jobId)).thenReturn(mockJob); when(mockContext.getApplicationID()).thenReturn(appId); when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry); return mockContext; }
Example #26
Source File: TaskFinishedEvent.java From hadoop with Apache License 2.0 | 5 votes |
/** * Create an event to record the successful completion of a task * @param id Task ID * @param attemptId Task Attempt ID of the successful attempt for this task * @param finishTime Finish time of the task * @param taskType Type of the task * @param status Status string * @param counters Counters for the task */ public TaskFinishedEvent(TaskID id, TaskAttemptID attemptId, long finishTime, TaskType taskType, String status, Counters counters) { this.taskid = id; this.successfulAttemptId = attemptId; this.finishTime = finishTime; this.taskType = taskType; this.status = status; this.counters = counters; }
Example #27
Source File: TestRecovery.java From big-c with Apache License 2.0 | 5 votes |
private TaskAttemptInfo getMockTaskAttemptInfo(TaskAttemptID tai, TaskAttemptState tas) { ContainerId ci = mock(ContainerId.class); Counters counters = mock(Counters.class); TaskType tt = TaskType.MAP; long finishTime = System.currentTimeMillis(); TaskAttemptInfo mockTAinfo = mock(TaskAttemptInfo.class); when(mockTAinfo.getAttemptId()).thenReturn(tai); when(mockTAinfo.getContainerId()).thenReturn(ci); when(mockTAinfo.getCounters()).thenReturn(counters); when(mockTAinfo.getError()).thenReturn(""); when(mockTAinfo.getFinishTime()).thenReturn(finishTime); when(mockTAinfo.getHostname()).thenReturn("localhost"); when(mockTAinfo.getHttpPort()).thenReturn(23); when(mockTAinfo.getMapFinishTime()).thenReturn(finishTime - 1000L); when(mockTAinfo.getPort()).thenReturn(24); when(mockTAinfo.getRackname()).thenReturn("defaultRack"); when(mockTAinfo.getShuffleFinishTime()).thenReturn(finishTime - 2000L); when(mockTAinfo.getShufflePort()).thenReturn(25); when(mockTAinfo.getSortFinishTime()).thenReturn(finishTime - 3000L); when(mockTAinfo.getStartTime()).thenReturn(finishTime -10000); when(mockTAinfo.getState()).thenReturn("task in progress"); when(mockTAinfo.getTaskStatus()).thenReturn(tas.toString()); when(mockTAinfo.getTaskType()).thenReturn(tt); when(mockTAinfo.getTrackerName()).thenReturn("TrackerName"); return mockTAinfo; }
Example #28
Source File: IndexerJobDriver.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private boolean runMrWithLookup(String uuid, TableDescriptor descriptor, List<Path> inprogressPathList, String table, Path fileCache, Path outputPath, int reducerMultipler, Path tmpPath, TableStats tableStats, String snapshot) throws ClassNotFoundException, IOException, InterruptedException { PartitionedInputResult result = buildPartitionedInputData(uuid, tmpPath, descriptor, inprogressPathList, snapshot, fileCache); Job job = Job.getInstance(getConf(), "Blur Row Updater for table [" + table + "]"); ExistingDataIndexLookupMapper.setSnapshot(job, MRUPDATE_SNAPSHOT); FileInputFormat.addInputPath(job, result._partitionedInputData); MultipleInputs.addInputPath(job, result._partitionedInputData, SequenceFileInputFormat.class, ExistingDataIndexLookupMapper.class); for (Path p : inprogressPathList) { FileInputFormat.addInputPath(job, p); MultipleInputs.addInputPath(job, p, SequenceFileInputFormat.class, NewDataMapper.class); } BlurOutputFormat.setOutputPath(job, outputPath); BlurOutputFormat.setupJob(job, descriptor); job.setReducerClass(UpdateReducer.class); job.setMapOutputKeyClass(IndexKey.class); job.setMapOutputValueClass(IndexValue.class); job.setPartitionerClass(IndexKeyPartitioner.class); job.setGroupingComparatorClass(IndexKeyWritableComparator.class); BlurOutputFormat.setReducerMultiplier(job, reducerMultipler); boolean success = job.waitForCompletion(true); Counters counters = job.getCounters(); LOG.info("Counters [" + counters + "]"); return success; }
Example #29
Source File: TestSyncTable.java From hbase with Apache License 2.0 | 5 votes |
private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, String... options) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); String[] args = Arrays.copyOf(options, options.length+3); args[options.length] = testDir.toString(); args[options.length+1] = sourceTableName.getNameAsString(); args[options.length+2] = targetTableName.getNameAsString(); int code = syncTable.run(args); assertEquals("sync table job failed", 0, code); LOG.info("Sync tables completed"); return syncTable.counters; }
Example #30
Source File: HistoryEventEmitter.java From big-c with Apache License 2.0 | 5 votes |
protected static Counters maybeParseCounters(String counters) { try { return parseCounters(counters); } catch (ParseException e) { LOG.warn("The counter string, \"" + counters + "\" is badly formatted."); return null; } }