Java Code Examples for org.apache.hadoop.mapreduce.Job#setMapSpeculativeExecution()
The following examples show how to use
org.apache.hadoop.mapreduce.Job#setMapSpeculativeExecution() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ActiveUserRunner.java From BigDataArchitect with Apache License 2.0 | 5 votes |
@Override protected void beforeRunJob(Job job) throws IOException { super.beforeRunJob(job); job.setNumReduceTasks(3); // 每个统计维度一个reducer job.setPartitionerClass(ActiveUserPartitioner.class); // 设置分区类 // 不启动推测执行 job.setMapSpeculativeExecution(false); job.setReduceSpeculativeExecution(false); }
Example 2
Source File: UpdateColumnJob.java From indexr with Apache License 2.0 | 5 votes |
public boolean doRun(Config upcolConfig) throws Exception { JobConf jobConf = new JobConf(getConf(), UpdateColumnJob.class); jobConf.setKeepFailedTaskFiles(false); jobConf.setNumReduceTasks(0); String jobName = String.format("indexr-upcol-%s-%s-%s", upcolConfig.table, LocalDateTime.now().format(timeFormatter), RandomStringUtils.randomAlphabetic(5)); jobConf.setJobName(jobName); jobConf.set(CONFKEY, JsonUtil.toJson(upcolConfig)); Path workDir = new Path(jobConf.getWorkingDirectory(), jobName); jobConf.setWorkingDirectory(workDir); Job job = Job.getInstance(jobConf); job.setInputFormatClass(SegmentInputFormat.class); job.setMapperClass(UpColSegmentMapper.class); job.setJarByClass(UpdateColumnJob.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setMapSpeculativeExecution(false); job.setOutputFormatClass(UpColSegmentOutputFormat.class); job.submit(); boolean ok = job.waitForCompletion(true); if (!ok) { TaskReport[] reports = job.getTaskReports(TaskType.MAP); if (reports != null) { for (TaskReport report : reports) { log.error("Error in task [%s] : %s", report.getTaskId(), Arrays.toString(report.getDiagnostics())); } } } return ok; }
Example 3
Source File: CompactionTool.java From hbase with Apache License 2.0 | 5 votes |
/** * Execute compaction, using a Map-Reduce job. */ private int doMapReduce(final FileSystem fs, final Set<Path> toCompactDirs, final boolean compactOnce, final boolean major) throws Exception { Configuration conf = getConf(); conf.setBoolean(CONF_COMPACT_ONCE, compactOnce); conf.setBoolean(CONF_COMPACT_MAJOR, major); Job job = new Job(conf); job.setJobName("CompactionTool"); job.setJarByClass(CompactionTool.class); job.setMapperClass(CompactionMapper.class); job.setInputFormatClass(CompactionInputFormat.class); job.setOutputFormatClass(NullOutputFormat.class); job.setMapSpeculativeExecution(false); job.setNumReduceTasks(0); // add dependencies (including HBase ones) TableMapReduceUtil.addDependencyJars(job); Path stagingDir = JobUtil.getQualifiedStagingDir(conf); FileSystem stagingFs = stagingDir.getFileSystem(conf); try { // Create input file with the store dirs Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); List<Path> storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster TableMapReduceUtil.initCredentials(job); // Despite the method name this will get delegation token for the filesystem TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]), conf); // Start the MR Job and wait return job.waitForCompletion(true) ? 0 : 1; } finally { fs.delete(stagingDir, true); } }
Example 4
Source File: HalyardBulkDelete.java From Halyard with Apache License 2.0 | 4 votes |
@Override public int run(CommandLine cmd) throws Exception { String source = cmd.getOptionValue('t'); TableMapReduceUtil.addDependencyJars(getConf(), HalyardExport.class, NTriplesUtil.class, Rio.class, AbstractRDFHandler.class, RDFFormat.class, RDFParser.class, HTable.class, HBaseConfiguration.class, AuthenticationProtos.class, Trace.class, Gauge.class); HBaseConfiguration.addHbaseResources(getConf()); Job job = Job.getInstance(getConf(), "HalyardDelete " + source); if (cmd.hasOption('s')) { job.getConfiguration().set(SUBJECT, cmd.getOptionValue('s')); } if (cmd.hasOption('p')) { job.getConfiguration().set(PREDICATE, cmd.getOptionValue('p')); } if (cmd.hasOption('o')) { job.getConfiguration().set(OBJECT, cmd.getOptionValue('o')); } if (cmd.hasOption('g')) { job.getConfiguration().setStrings(CONTEXTS, cmd.getOptionValues('g')); } job.setJarByClass(HalyardBulkDelete.class); TableMapReduceUtil.initCredentials(job); Scan scan = HalyardTableUtils.scan(null, null); TableMapReduceUtil.initTableMapperJob(source, scan, DeleteMapper.class, ImmutableBytesWritable.class, LongWritable.class, job); job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(KeyValue.class); job.setSpeculativeExecution(false); job.setMapSpeculativeExecution(false); job.setReduceSpeculativeExecution(false); try (HTable hTable = HalyardTableUtils.getTable(getConf(), source, false, 0)) { HFileOutputFormat2.configureIncrementalLoad(job, hTable.getTableDescriptor(), hTable.getRegionLocator()); FileOutputFormat.setOutputPath(job, new Path(cmd.getOptionValue('f'))); TableMapReduceUtil.addDependencyJars(job); if (job.waitForCompletion(true)) { new LoadIncrementalHFiles(getConf()).doBulkLoad(new Path(cmd.getOptionValue('f')), hTable); LOG.info("Bulk Delete Completed.."); return 0; } } return -1; }
Example 5
Source File: ExportSnapshot.java From hbase with Apache License 2.0 | 4 votes |
/** * Run Map-Reduce Job to perform the files copy. */ private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, final String filesUser, final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); if (mappers > 0) { conf.setInt(CONF_NUM_SPLITS, mappers); conf.setInt(MR_NUM_MAPS, mappers); } conf.setInt(CONF_FILES_MODE, filesMode); conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum); conf.set(CONF_OUTPUT_ROOT, outputRoot.toString()); conf.set(CONF_INPUT_ROOT, inputRoot.toString()); conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB); conf.set(CONF_SNAPSHOT_NAME, snapshotName); conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString()); String jobname = conf.get(CONF_MR_JOB_NAME, "ExportSnapshot-" + snapshotName); Job job = new Job(conf); job.setJobName(jobname); job.setJarByClass(ExportSnapshot.class); TableMapReduceUtil.addDependencyJars(job); job.setMapperClass(ExportMapper.class); job.setInputFormatClass(ExportSnapshotInputFormat.class); job.setOutputFormatClass(NullOutputFormat.class); job.setMapSpeculativeExecution(false); job.setNumReduceTasks(0); // Acquire the delegation Tokens Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { throw new ExportSnapshotException(job.getStatus().getFailureInfo()); } }