Java Code Examples for org.mortbay.log.Log#info()

The following examples show how to use org.mortbay.log.Log#info() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MRCompactorJobRunner.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private void moveTmpPathToOutputPath() throws IOException {
  Retryer<Void> retryer = RetryerFactory.newInstance(this.retrierConfig);

  LOG.info(String.format("Moving %s to %s", this.dataset.outputTmpPath(), this.dataset.outputPath()));

  this.fs.delete(this.dataset.outputPath(), true);

  if (this.isRetryEnabled) {
    try {
      retryer.call(() -> {
        if (fs.exists(this.dataset.outputPath())) {
          throw new IOException("Path " + this.dataset.outputPath() + " exists however it should not. Will wait more.");
        }
        return null;
      });
    } catch (Exception e) {
      throw new IOException(e);
    }
  }

  WriterUtils.mkdirsWithRecursivePermissionWithRetry(MRCompactorJobRunner.this.fs, this.dataset.outputPath().getParent(), this.perm, this.retrierConfig);

  Log.info("Moving from fs: ("+MRCompactorJobRunner.this.tmpFs.getUri()+") path: "+ this.dataset.outputTmpPath() + " to "+ "fs: ("+ FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()).getUri()+") output path: " + this.dataset.outputPath());
  HadoopUtils.movePath (MRCompactorJobRunner.this.tmpFs, this.dataset.outputTmpPath(), FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()), this.dataset.outputPath(), false, this.fs.getConf()) ;
}
 
Example 2
Source File: TestAMRMClient.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private int getAllocatedContainersNumber(
    AMRMClientImpl<ContainerRequest> amClient, int iterationsLeft)
    throws YarnException, IOException {
  int allocatedContainerCount = 0;
  while (iterationsLeft-- > 0) {
    Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft);
    AllocateResponse allocResponse = amClient.allocate(0.1f);
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
      
    assertEquals(nodeCount, amClient.getClusterNodeCount());
    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
      
    if(allocatedContainerCount == 0) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(100);
    }
  }
  return allocatedContainerCount;
}
 
Example 3
Source File: EmbeddedGithubJsonToParquet.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private void downloadFile(String fileUrl, Path destination) {
  if (destination.toFile().exists()) {
    Log.info(String.format("Skipping download for %s at %s because destination already exists", fileUrl,
        destination.toString()));
    return;
  }

  try {
    URL archiveUrl = new URL(fileUrl);
    ReadableByteChannel rbc = Channels.newChannel(archiveUrl.openStream());
    FileOutputStream fos = new FileOutputStream(String.valueOf(destination));
    Log.info(String.format("Downloading %s at %s", fileUrl, destination.toString()));
    fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
    Log.info(String.format("Download complete for %s at %s", fileUrl, destination.toString()));
  } catch (IOException e) {
    e.printStackTrace();
  }
}
 
Example 4
Source File: JobEndNotifier.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Notify the URL just once. Use best effort.
 */
protected boolean notifyURLOnce() {
  boolean success = false;
  try {
    Log.info("Job end notification trying " + urlToNotify);
    HttpURLConnection conn =
      (HttpURLConnection) urlToNotify.openConnection(proxyToUse);
    conn.setConnectTimeout(timeout);
    conn.setReadTimeout(timeout);
    conn.setAllowUserInteraction(false);
    if(conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
      Log.warn("Job end notification to " + urlToNotify +" failed with code: "
      + conn.getResponseCode() + " and message \"" + conn.getResponseMessage()
      +"\"");
    }
    else {
      success = true;
      Log.info("Job end notification to " + urlToNotify + " succeeded");
    }
  } catch(IOException ioe) {
    Log.warn("Job end notification to " + urlToNotify + " failed", ioe);
  }
  return success;
}
 
Example 5
Source File: IcebergStorage.java    From iceberg with Apache License 2.0 6 votes vote down vote up
private Table load(String location, Job job) throws IOException {
  if(iceberg == null) {
    Class<?> tablesImpl = job.getConfiguration().getClass(PIG_ICEBERG_TABLES_IMPL, HadoopTables.class);
    Log.info("Initializing iceberg tables implementation: " + tablesImpl);
    iceberg = (Tables) ReflectionUtils.newInstance(tablesImpl, job.getConfiguration());
  }

  Table result = tables.get(location);

  if (result == null) {
    try {
      LOG.info(format("[%s]: Loading table for location: %s", signature, location));
      result = iceberg.load(location);
      tables.put(location, result);
    } catch (Exception e) {
      throw new FrontendException("Failed to instantiate tables implementation", e);
    }
  }

  return result;
}
 
Example 6
Source File: ViewFileSystemTestSetup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * 
 * @param fsTarget - the target fs of the view fs.
 * @return return the ViewFS File context to be used for tests
 * @throws Exception
 */
static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  fsTarget.mkdirs(targetOfTests);


  // Set up viewfs link for test dir as described above
  String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
  
  
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");


  FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
  fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fsView.getWorkingDirectory());
  return fsView;
}
 
Example 7
Source File: TestGreedyReservationAgent.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {

  long seed = rand.nextLong();
  rand.setSeed(seed);
  Log.info("Running with seed: " + seed);

  // setting completely loose quotas
  long timeWindow = 1000000L;
  Resource clusterCapacity = Resource.newInstance(100 * 1024, 100);
  step = 1000L;
  ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil();
  String reservationQ = testUtil.getFullReservationQueueName();

  float instConstraint = 100;
  float avgConstraint = 100;

  ReservationSchedulerConfiguration conf =
      ReservationSystemTestUtil.createConf(reservationQ, timeWindow,
          instConstraint, avgConstraint);
  CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
  policy.init(reservationQ, conf);
  agent = new GreedyReservationAgent();

  QueueMetrics queueMetrics = mock(QueueMetrics.class);

  plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step,
      res, minAlloc, maxAlloc, "dedicated", null, true);
}
 
Example 8
Source File: ViewFsTestSetup.java    From big-c with Apache License 2.0 5 votes vote down vote up
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  FileContext fsTarget = FileContext.getLocalFSFileContext();
  Path targetOfTests = helper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  
  fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
  Configuration conf = new Configuration();
  
  // Set up viewfs link for test dir as described above
  String testDir = helper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
    
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
  
  FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fc.getWorkingDirectory());
  //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
  //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
  return fc;
}
 
Example 9
Source File: TestAllLoader.java    From spork with Apache License 2.0 5 votes vote down vote up
/**
 * Validates that the loadAlias can read the correct amount of records
 * 
 * @param server
 * @param loadAlias
 * @throws IOException
 */
private void readRecordsFromLoader(PigServer server, String loadAlias,
        int totalRowCount) throws IOException {

    Iterator<Tuple> result = server.openIterator(loadAlias);
    int count = 0;

    while ((result.next()) != null) {
        count++;
    }

    Log.info("Validating expected: " + totalRowCount + " against " + count);
    assertEquals(totalRowCount, count);
}
 
Example 10
Source File: MockNM.java    From big-c with Apache License 2.0 5 votes vote down vote up
public NodeHeartbeatResponse nodeHeartbeat(ApplicationAttemptId attemptId,
    long containerId, ContainerState containerState) throws Exception {
  HashMap<ApplicationId, List<ContainerStatus>> nodeUpdate =
      new HashMap<ApplicationId, List<ContainerStatus>>(1);
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      BuilderUtils.newContainerId(attemptId, containerId), containerState,
      "Success", 0);
  ArrayList<ContainerStatus> containerStatusList =
      new ArrayList<ContainerStatus>(1);
  containerStatusList.add(containerStatus);
  Log.info("ContainerStatus: " + containerStatus);
  nodeUpdate.put(attemptId.getApplicationId(), containerStatusList);
  return nodeHeartbeat(nodeUpdate, true);
}
 
Example 11
Source File: EmbeddedGithubJsonToParquet.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private Path createDownloadDir(String workDir, String fileUrl) {
  Path downloadDirPath = Paths.get(workDir, DOWNLOAD_DIR);
  File downloadDirFile = downloadDirPath.toFile();
  try {
    Log.info(String.format("Creating download dir %s", downloadDirFile.toPath().toString()));
    FileUtils.forceMkdir(downloadDirFile);
  } catch (IOException e) {
    throw new RuntimeException(String
        .format("Unable to create download location for archive: %s at %s", fileUrl, downloadDirPath.toString()));
  }
  Log.info(String.format("Created download dir %s", downloadDirFile.toPath().toString()));
  return downloadDirPath;
}
 
Example 12
Source File: JobEndNotifier.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Notify a server of the completion of a submitted job. The user must have
 * configured MRJobConfig.MR_JOB_END_NOTIFICATION_URL
 * @param jobReport JobReport used to read JobId and JobStatus
 * @throws InterruptedException
 */
public void notify(JobReport jobReport)
  throws InterruptedException {
  // Do we need job-end notification?
  if (userUrl == null) {
    Log.info("Job end notification URL not set, skipping.");
    return;
  }

  //Do string replacements for jobId and jobStatus
  if (userUrl.contains(JOB_ID)) {
    userUrl = userUrl.replace(JOB_ID, jobReport.getJobId().toString());
  }
  if (userUrl.contains(JOB_STATUS)) {
    userUrl = userUrl.replace(JOB_STATUS, jobReport.getJobState().toString());
  }

  // Create the URL, ensure sanity
  try {
    urlToNotify = new URL(userUrl);
  } catch (MalformedURLException mue) {
    Log.warn("Job end notification couldn't parse " + userUrl, mue);
    return;
  }

  // Send notification
  boolean success = false;
  while (numTries-- > 0 && !success) {
    Log.info("Job end notification attempts left " + numTries);
    success = notifyURLOnce();
    if (!success) {
      Thread.sleep(waitInterval);
    }
  }
  if (!success) {
    Log.warn("Job end notification failed to notify : " + urlToNotify);
  } else {
    Log.info("Job end notification succeeded for " + jobReport.getJobId());
  }
}
 
Example 13
Source File: MockNM.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public NodeHeartbeatResponse nodeHeartbeat(Map<ApplicationId,
    List<ContainerStatus>> conts, boolean isHealthy, int resId) throws Exception {
  NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
  NodeStatus status = Records.newRecord(NodeStatus.class);
  status.setResponseId(resId);
  status.setNodeId(nodeId);
  for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
    Log.info("entry.getValue() " + entry.getValue());
    status.setContainersStatuses(entry.getValue());
  }
  NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
  healthStatus.setHealthReport("");
  healthStatus.setIsNodeHealthy(isHealthy);
  healthStatus.setLastHealthReportTime(1);
  status.setNodeHealthStatus(healthStatus);
  req.setNodeStatus(status);
  req.setLastKnownContainerTokenMasterKey(this.currentContainerTokenMasterKey);
  req.setLastKnownNMTokenMasterKey(this.currentNMTokenMasterKey);
  NodeHeartbeatResponse heartbeatResponse =
      resourceTracker.nodeHeartbeat(req);
  
  MasterKey masterKeyFromRM = heartbeatResponse.getContainerTokenMasterKey();
  if (masterKeyFromRM != null
      && masterKeyFromRM.getKeyId() != this.currentContainerTokenMasterKey
          .getKeyId()) {
    this.currentContainerTokenMasterKey = masterKeyFromRM;
  }

  masterKeyFromRM = heartbeatResponse.getNMTokenMasterKey();
  if (masterKeyFromRM != null
      && masterKeyFromRM.getKeyId() != this.currentNMTokenMasterKey
          .getKeyId()) {
    this.currentNMTokenMasterKey = masterKeyFromRM;
  }
  
  return heartbeatResponse;
}
 
Example 14
Source File: MockNM.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public NodeHeartbeatResponse nodeHeartbeat(ApplicationAttemptId attemptId,
    long containerId, ContainerState containerState) throws Exception {
  HashMap<ApplicationId, List<ContainerStatus>> nodeUpdate =
      new HashMap<ApplicationId, List<ContainerStatus>>(1);
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      BuilderUtils.newContainerId(attemptId, containerId), containerState,
      "Success", 0);
  ArrayList<ContainerStatus> containerStatusList =
      new ArrayList<ContainerStatus>(1);
  containerStatusList.add(containerStatus);
  Log.info("ContainerStatus: " + containerStatus);
  nodeUpdate.put(attemptId.getApplicationId(), containerStatusList);
  return nodeHeartbeat(nodeUpdate, true);
}
 
Example 15
Source File: TestGreedyReservationAgent.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {

  long seed = rand.nextLong();
  rand.setSeed(seed);
  Log.info("Running with seed: " + seed);

  // setting completely loose quotas
  long timeWindow = 1000000L;
  Resource clusterCapacity = Resource.newInstance(100 * 1024, 100, 100);
  step = 1000L;
  ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil();
  String reservationQ = testUtil.getFullReservationQueueName();

  float instConstraint = 100;
  float avgConstraint = 100;

  ReservationSchedulerConfiguration conf =
      ReservationSystemTestUtil.createConf(reservationQ, timeWindow,
          instConstraint, avgConstraint);
  CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
  policy.init(reservationQ, conf);
  agent = new GreedyReservationAgent();

  QueueMetrics queueMetrics = mock(QueueMetrics.class);

  plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step,
      res, minAlloc, maxAlloc, "dedicated", null, true);
}
 
Example 16
Source File: CouchbaseInputFormat.java    From laser with Apache License 2.0 4 votes vote down vote up
@Override
public List<InputSplit> getSplits(JobContext context) throws IOException,
		InterruptedException {
	Configuration conf = context.getConfiguration();
	int numMapTasks = conf.getInt("com.b5m.couchbase.num.map.tasks", 120);
	final List<URI> ClientURIList = new ArrayList<URI>();

	try {
		List<String> uris = Arrays.asList(conf.get(
				CouchbaseConfig.CB_INPUT_CLUSTER).split(","));
		for (String uri : uris) {
			final URI ClusterURI = new URI(uri);
			ClientURIList.add(ClusterURI.resolve("/pools"));
		}
	} catch (URISyntaxException e) {
		throw new IOException(e);
	}
	final String bucket = conf.get(CouchbaseConfig.CB_INPUT_BUCKET, "");
	final String password = conf.get(CouchbaseConfig.CB_INPUT_PASSWORD, "");

	final CouchbaseConnectionFactory fact = new CouchbaseConnectionFactory(
			ClientURIList, bucket, password);

	final com.couchbase.client.vbucket.config.Config vbconfig = fact
			.getVBucketConfig();

	final List<VBucket> allVBuckets = vbconfig.getVbuckets();
	int numSplits = Math.min(numMapTasks, allVBuckets.size());
	int numVBucketsPerSplit = (int) Math.ceil(allVBuckets.size()
			/ (double) numSplits);
	Log.info("VBuckets size = {}", allVBuckets.size());
	@SuppressWarnings("unchecked")
	final ArrayList<Integer>[] vblists = new ArrayList[numSplits];
	int splitIndex = 0;
	int vbuckets = 0;
	for (int vbid = 0; vbid < allVBuckets.size(); vbid++) {
		if (vbuckets >= numVBucketsPerSplit) {
			vbuckets = 0;
			splitIndex++;
		}
		if (null == vblists[splitIndex]) {
			vblists[splitIndex] = new ArrayList<Integer>(
					numVBucketsPerSplit);
		}
		vblists[splitIndex].add(vbid);
		vbuckets++;
	}
	// int vbid = 0;
	// for(VBucket v : allVBuckets) {
	// if(vblists[v.getMaster()] == null) {
	// vblists[v.getMaster()] = new ArrayList<Integer>();
	// }
	// vblists[v.getMaster()].add(vbid);
	// vbid++;
	// }
	final ArrayList<InputSplit> splits = new ArrayList<InputSplit>();
	for (ArrayList<Integer> vblist : vblists) {
		if (null != vblist) {
			splits.add(new CouchbaseSplit(vblist));
		}
	}
	return splits;
}
 
Example 17
Source File: AdmmOptimizerDriver.java    From laser with Apache License 2.0 4 votes vote down vote up
public static int run(Path signalData, Path output,
		Float regularizationFactor, Boolean addIntercept,
		Boolean regularizeIntercept, Integer iterationsMaximum,
		Configuration baseConf) throws IOException, ClassNotFoundException,
		InterruptedException {
	Configuration conf = new Configuration(baseConf);

	float thisRegularizationFactor = null == regularizationFactor ? DEFAULT_REGULARIZATION_FACTOR
			: regularizationFactor;
	boolean thisAddIntercept = null == addIntercept ? true : addIntercept;
	boolean thisRegularizeIntercept = null == regularizeIntercept ? false
			: regularizeIntercept;
	int thisIterationsMaximum = null == iterationsMaximum ? DEFAULT_ADMM_ITERATIONS_MAX
			: iterationsMaximum;

	int iterationNumber = 0;
	boolean isFinalIteration = false;
	conf.set("mapred.job.queue.name", "sf1");
	conf.setInt("mapred.task.timeout", 6000000);
	conf.setInt("mapred.job.map.memory.mb", 4096);
	conf.setInt("mapred.job.reduce.memory.mb", 4096);

	FileSystem fs = output.getFileSystem(conf);
	HadoopUtil.delete(conf, output);

	String intermediateHdfsBaseString = output.toString() + "/Iteration/";

	while (!isFinalIteration) {
		long preStatus = 0;
		Path previousHdfsResultsPath = new Path(intermediateHdfsBaseString
				+ ITERATION_FOLDER_NAME + (iterationNumber - 1));
		Path currentHdfsResultsPath = new Path(intermediateHdfsBaseString
				+ ITERATION_FOLDER_NAME + iterationNumber);

		long curStatus = doAdmmIteration(conf, previousHdfsResultsPath,
				currentHdfsResultsPath, signalData, iterationNumber,
				thisAddIntercept, thisRegularizeIntercept,
				thisRegularizationFactor);
		Log.info("curStatus = {}", curStatus);
		isFinalIteration = convergedOrMaxed(curStatus, preStatus,
				iterationNumber, thisIterationsMaximum);

		if (isFinalIteration) {
			Path finalOutput = new Path(output, FINAL_MODEL);
			fs.delete(finalOutput, true);
			fs.rename(new Path(currentHdfsResultsPath, "Z"), finalOutput);
		}
		iterationNumber++;
	}

	return 0;
}
 
Example 18
Source File: TestYarnCLI.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testGetContainers() throws Exception {
  ApplicationCLI cli = createAndGetAppCLI();
  ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
  ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
      applicationId, 1);
  ContainerId containerId = ContainerId.newContainerId(attemptId, 1);
  ContainerId containerId1 = ContainerId.newContainerId(attemptId, 2);
  ContainerId containerId2 = ContainerId.newContainerId(attemptId, 3);
  long time1=1234,time2=5678;
  ContainerReport container = ContainerReport.newInstance(containerId, null,
      NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2,
      "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
      "http://" + NodeId.newInstance("host", 2345).toString());
  ContainerReport container1 = ContainerReport.newInstance(containerId1, null,
      NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2,
      "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE,
      "http://" + NodeId.newInstance("host", 2345).toString());
  ContainerReport container2 = ContainerReport.newInstance(containerId2, null,
      NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1,0,
      "diagnosticInfo", "", 0, ContainerState.RUNNING,
      "http://" + NodeId.newInstance("host", 2345).toString());
  List<ContainerReport> reports = new ArrayList<ContainerReport>();
  reports.add(container);
  reports.add(container1);
  reports.add(container2);
  DateFormat dateFormat=new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy");
  when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn(
      reports);
  sysOutStream.reset();
  int result = cli.run(new String[] { "container", "-list",
      attemptId.toString() });
  assertEquals(0, result);
  verify(client).getContainers(attemptId);
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  PrintWriter pw = new PrintWriter(baos);
  pw.println("Total number of containers :3");
  pw.print("                  Container-Id");
  pw.print("\t          Start Time");
  pw.print("\t         Finish Time");
  pw.print("\t               State");
  pw.print("\t                Host");
  pw.print("\t   Node Http Address");
  pw.println("\t                            LOG-URL");
  pw.print(" container_1234_0005_01_000001");
  pw.print("\t"+dateFormat.format(new Date(time1)));
  pw.print("\t"+dateFormat.format(new Date(time2)));
  pw.print("\t            COMPLETE");
  pw.print("\t           host:1234");
  pw.print("\t    http://host:2345");
  pw.println("\t                             logURL");
  pw.print(" container_1234_0005_01_000002");
  pw.print("\t"+dateFormat.format(new Date(time1)));
  pw.print("\t"+dateFormat.format(new Date(time2)));
  pw.print("\t            COMPLETE");
  pw.print("\t           host:1234");
  pw.print("\t    http://host:2345");
  pw.println("\t                             logURL");
  pw.print(" container_1234_0005_01_000003");
  pw.print("\t"+dateFormat.format(new Date(time1)));
  pw.print("\t                 N/A");
  pw.print("\t             RUNNING");
  pw.print("\t           host:1234");
  pw.print("\t    http://host:2345");
  pw.println("\t                                   ");
  pw.close();
  String appReportStr = baos.toString("UTF-8");
  Log.info("ExpectedOutput");
  Log.info("["+appReportStr+"]");
  Log.info("OutputFrom command");
  String actualOutput = sysOutStream.toString();
  Log.info("["+actualOutput+"]");
  Assert.assertEquals(appReportStr, sysOutStream.toString());
}
 
Example 19
Source File: JobEndNotifier.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Parse the URL that needs to be notified of the end of the job, along
 * with the number of retries in case of failure, the amount of time to
 * wait between retries and proxy settings
 * @param conf the configuration 
 */
public void setConf(Configuration conf) {
  this.conf = conf;
  
  numTries = Math.min(
    conf.getInt(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, 0) + 1
    , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, 1)
  );
  waitInterval = Math.min(
  conf.getInt(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, 5000)
  , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, 5000)
  );
  waitInterval = (waitInterval < 0) ? 5000 : waitInterval;

  timeout = conf.getInt(JobContext.MR_JOB_END_NOTIFICATION_TIMEOUT,
      JobContext.DEFAULT_MR_JOB_END_NOTIFICATION_TIMEOUT);

  userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL);

  proxyConf = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY);

  //Configure the proxy to use if its set. It should be set like
  //proxyType@proxyHostname:port
  if(proxyConf != null && !proxyConf.equals("") &&
       proxyConf.lastIndexOf(":") != -1) {
    int typeIndex = proxyConf.indexOf("@");
    Proxy.Type proxyType = Proxy.Type.HTTP;
    if(typeIndex != -1 &&
      proxyConf.substring(0, typeIndex).compareToIgnoreCase("socks") == 0) {
      proxyType = Proxy.Type.SOCKS;
    }
    String hostname = proxyConf.substring(typeIndex + 1,
      proxyConf.lastIndexOf(":"));
    String portConf = proxyConf.substring(proxyConf.lastIndexOf(":") + 1);
    try {
      int port = Integer.parseInt(portConf);
      proxyToUse = new Proxy(proxyType,
        new InetSocketAddress(hostname, port));
      Log.info("Job end notification using proxy type \"" + proxyType + 
      "\" hostname \"" + hostname + "\" and port \"" + port + "\"");
    } catch(NumberFormatException nfe) {
      Log.warn("Job end notification couldn't parse configured proxy's port "
        + portConf + ". Not going to use a proxy");
    }
  }

}
 
Example 20
Source File: HdfsManager.java    From aliyun-maxcompute-data-collectors with Apache License 2.0 4 votes vote down vote up
public HdfsManager(SqoopOptions options) {
  this.options = options;
  this.options.setTableName("sqoop_orm");

  isCreateTable = options.isOdpsCreateTable();

  String tableName = Preconditions.checkNotNull(options.getOdpsTable(),
      "Import to ODPS error: Table name not specified");
  String accessID = Preconditions.checkNotNull(options.getOdpsAccessID(),
      "Error: ODPS access ID not specified");
  String accessKey = Preconditions.checkNotNull(options.getOdpsAccessKey(),
      "Error: ODPS access key not specified");
  String project = Preconditions.checkNotNull(options.getOdpsProject(),
      "Error: ODPS project not specified");
  String endpoint = Preconditions.checkNotNull(options.getOdpsEndPoint(),
      "Error: ODPS endpoint not specified");

  odps = new Odps(new AliyunAccount(accessID, accessKey));
  odps.setUserAgent(OdpsUtil.getUserAgent());
  odps.setDefaultProject(project);
  odps.setEndpoint(endpoint);
  
  String[] colNames = options.getColumns();
  Tables tables = odps.tables();
  boolean existsTable = false;
  try {
    existsTable = tables.exists(options.getOdpsTable());
  } catch (OdpsException e) {
    throw new RuntimeException("ODPS exception", e);
  }
  if (colNames == null) {
    if (!existsTable) {
      throw new RuntimeException("missing --columns");
    } else {
      Table t = tables.get(options.getOdpsTable());
      colNames = new String[t.getSchema().getColumns().size()];
      Log.info("colNames size: " + colNames.length);
      for (int i = 0; i < colNames.length; ++i) {
        colNames[i] = new String(t.getSchema().getColumns().get(i).getName());
        Log.info("colNames colNames[i]: " + colNames[i]);
      }
    }

    options.setColumns(colNames);
  }

}