Java Code Examples for org.mortbay.log.Log

The following examples show how to use org.mortbay.log.Log. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
/**
 * Set up the classloader for the webapp, using the various parts of the Maven project
 *
 * @see org.mortbay.jetty.webapp.Configuration#configureClassLoader()
 */
public void configureClassLoader() throws Exception {
    if (classPathFiles != null) {
        Log.debug("Setting up classpath ...");

        //put the classes dir and all dependencies into the classpath
        for (File classPathFile : classPathFiles) {
            ((WebAppClassLoader) getWebAppContext().getClassLoader()).addClassPath(
                    classPathFile.getCanonicalPath());
        }

        if (Log.isDebugEnabled()) {
            Log.debug("Classpath = " + LazyList.array2List(
                    ((URLClassLoader) getWebAppContext().getClassLoader()).getURLs()));
        }
    } else {
        super.configureClassLoader();
    }
}
 
Example 2
/**
 * Set up the classloader for the webapp, using the various parts of the Maven project
 *
 * @see org.mortbay.jetty.webapp.Configuration#configureClassLoader()
 */
public void configureClassLoader() throws Exception {
    if (classPathFiles != null) {
        Log.debug("Setting up classpath ...");

        //put the classes dir and all dependencies into the classpath
        for (File classPathFile : classPathFiles) {
            ((WebAppClassLoader) getWebAppContext().getClassLoader()).addClassPath(
                    classPathFile.getCanonicalPath());
        }

        if (Log.isDebugEnabled()) {
            Log.debug("Classpath = " + LazyList.array2List(
                    ((URLClassLoader) getWebAppContext().getClassLoader()).getURLs()));
        }
    } else {
        super.configureClassLoader();
    }
}
 
Example 3
Source Project: iceberg   Source File: IcebergStorage.java    License: Apache License 2.0 6 votes vote down vote up
private Table load(String location, Job job) throws IOException {
  if(iceberg == null) {
    Class<?> tablesImpl = job.getConfiguration().getClass(PIG_ICEBERG_TABLES_IMPL, HadoopTables.class);
    Log.info("Initializing iceberg tables implementation: " + tablesImpl);
    iceberg = (Tables) ReflectionUtils.newInstance(tablesImpl, job.getConfiguration());
  }

  Table result = tables.get(location);

  if (result == null) {
    try {
      LOG.info(format("[%s]: Loading table for location: %s", signature, location));
      result = iceberg.load(location);
      tables.put(location, result);
    } catch (Exception e) {
      throw new FrontendException("Failed to instantiate tables implementation", e);
    }
  }

  return result;
}
 
Example 4
Source Project: hadoop   Source File: TestAMRMClient.java    License: Apache License 2.0 6 votes vote down vote up
private int getAllocatedContainersNumber(
    AMRMClientImpl<ContainerRequest> amClient, int iterationsLeft)
    throws YarnException, IOException {
  int allocatedContainerCount = 0;
  while (iterationsLeft-- > 0) {
    Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft);
    AllocateResponse allocResponse = amClient.allocate(0.1f);
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
      
    assertEquals(nodeCount, amClient.getClusterNodeCount());
    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
      
    if(allocatedContainerCount == 0) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(100);
    }
  }
  return allocatedContainerCount;
}
 
Example 5
Source Project: hadoop   Source File: JobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Notify the URL just once. Use best effort.
 */
protected boolean notifyURLOnce() {
  boolean success = false;
  try {
    Log.info("Job end notification trying " + urlToNotify);
    HttpURLConnection conn =
      (HttpURLConnection) urlToNotify.openConnection(proxyToUse);
    conn.setConnectTimeout(timeout);
    conn.setReadTimeout(timeout);
    conn.setAllowUserInteraction(false);
    if(conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
      Log.warn("Job end notification to " + urlToNotify +" failed with code: "
      + conn.getResponseCode() + " and message \"" + conn.getResponseMessage()
      +"\"");
    }
    else {
      success = true;
      Log.info("Job end notification to " + urlToNotify + " succeeded");
    }
  } catch(IOException ioe) {
    Log.warn("Job end notification to " + urlToNotify + " failed", ioe);
  }
  return success;
}
 
Example 6
Source Project: big-c   Source File: TestAMRMClient.java    License: Apache License 2.0 6 votes vote down vote up
private int getAllocatedContainersNumber(
    AMRMClientImpl<ContainerRequest> amClient, int iterationsLeft)
    throws YarnException, IOException {
  int allocatedContainerCount = 0;
  while (iterationsLeft-- > 0) {
    Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft);
    AllocateResponse allocResponse = amClient.allocate(0.1f);
    assertEquals(0, amClient.ask.size());
    assertEquals(0, amClient.release.size());
      
    assertEquals(nodeCount, amClient.getClusterNodeCount());
    allocatedContainerCount += allocResponse.getAllocatedContainers().size();
      
    if(allocatedContainerCount == 0) {
      // sleep to let NM's heartbeat to RM and trigger allocations
      sleep(100);
    }
  }
  return allocatedContainerCount;
}
 
Example 7
Source Project: big-c   Source File: JobEndNotifier.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Notify the URL just once. Use best effort.
 */
protected boolean notifyURLOnce() {
  boolean success = false;
  try {
    Log.info("Job end notification trying " + urlToNotify);
    HttpURLConnection conn =
      (HttpURLConnection) urlToNotify.openConnection(proxyToUse);
    conn.setConnectTimeout(timeout);
    conn.setReadTimeout(timeout);
    conn.setAllowUserInteraction(false);
    if(conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
      Log.warn("Job end notification to " + urlToNotify +" failed with code: "
      + conn.getResponseCode() + " and message \"" + conn.getResponseMessage()
      +"\"");
    }
    else {
      success = true;
      Log.info("Job end notification to " + urlToNotify + " succeeded");
    }
  } catch(IOException ioe) {
    Log.warn("Job end notification to " + urlToNotify + " failed", ioe);
  }
  return success;
}
 
Example 8
/**
 * Set up the classloader for the webapp, using the various parts of the Maven project
 *
 * @see org.mortbay.jetty.webapp.Configuration#configureClassLoader()
 */
public void configureClassLoader() throws Exception {
    if (classPathFiles != null) {
        Log.debug("Setting up classpath ...");

        //put the classes dir and all dependencies into the classpath
        for (File classPathFile : classPathFiles) {
            ((WebAppClassLoader) getWebAppContext().getClassLoader()).addClassPath(
                    classPathFile.getCanonicalPath());
        }

        if (Log.isDebugEnabled()) {
            Log.debug("Classpath = " + LazyList.array2List(
                    ((URLClassLoader) getWebAppContext().getClassLoader()).getURLs()));
        }
    } else {
        super.configureClassLoader();
    }
}
 
Example 9
/**
 * Set up the classloader for the webapp, using the various parts of the Maven project
 *
 * @see org.mortbay.jetty.webapp.Configuration#configureClassLoader()
 */
public void configureClassLoader() throws Exception {
    if (classPathFiles != null) {
        Log.debug("Setting up classpath ...");

        //put the classes dir and all dependencies into the classpath
        for (File classPathFile : classPathFiles) {
            ((WebAppClassLoader) getWebAppContext().getClassLoader()).addClassPath(
                    classPathFile.getCanonicalPath());
        }

        if (Log.isDebugEnabled()) {
            Log.debug("Classpath = " + LazyList.array2List(
                    ((URLClassLoader) getWebAppContext().getClassLoader()).getURLs()));
        }
    } else {
        super.configureClassLoader();
    }
}
 
Example 10
Source Project: incubator-gobblin   Source File: MRCompactorJobRunner.java    License: Apache License 2.0 6 votes vote down vote up
private void moveTmpPathToOutputPath() throws IOException {
  Retryer<Void> retryer = RetryerFactory.newInstance(this.retrierConfig);

  LOG.info(String.format("Moving %s to %s", this.dataset.outputTmpPath(), this.dataset.outputPath()));

  this.fs.delete(this.dataset.outputPath(), true);

  if (this.isRetryEnabled) {
    try {
      retryer.call(() -> {
        if (fs.exists(this.dataset.outputPath())) {
          throw new IOException("Path " + this.dataset.outputPath() + " exists however it should not. Will wait more.");
        }
        return null;
      });
    } catch (Exception e) {
      throw new IOException(e);
    }
  }

  WriterUtils.mkdirsWithRecursivePermissionWithRetry(MRCompactorJobRunner.this.fs, this.dataset.outputPath().getParent(), this.perm, this.retrierConfig);

  Log.info("Moving from fs: ("+MRCompactorJobRunner.this.tmpFs.getUri()+") path: "+ this.dataset.outputTmpPath() + " to "+ "fs: ("+ FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()).getUri()+") output path: " + this.dataset.outputPath());
  HadoopUtils.movePath (MRCompactorJobRunner.this.tmpFs, this.dataset.outputTmpPath(), FileSystem.get(this.dataset.outputPath().getParent().toUri(), this.fs.getConf()), this.dataset.outputPath(), false, this.fs.getConf()) ;
}
 
Example 11
private void downloadFile(String fileUrl, Path destination) {
  if (destination.toFile().exists()) {
    Log.info(String.format("Skipping download for %s at %s because destination already exists", fileUrl,
        destination.toString()));
    return;
  }

  try {
    URL archiveUrl = new URL(fileUrl);
    ReadableByteChannel rbc = Channels.newChannel(archiveUrl.openStream());
    FileOutputStream fos = new FileOutputStream(String.valueOf(destination));
    Log.info(String.format("Downloading %s at %s", fileUrl, destination.toString()));
    fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
    Log.info(String.format("Download complete for %s at %s", fileUrl, destination.toString()));
  } catch (IOException e) {
    e.printStackTrace();
  }
}
 
Example 12
Source Project: opensoc-streaming   Source File: PcapHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Builds the reverseKey to fetch the pcaps in the reverse traffic
 * (destination to source).
 * 
 * @param key
 *          indicates hbase rowKey (partial or full) in the format
 *          "srcAddr-dstAddr-protocol-srcPort-dstPort-fragment"
 * @return String indicates the key in the format
 *         "dstAddr-srcAddr-protocol-dstPort-srcPort"
 */
public static String reverseKey(String key) {
  Assert.hasText(key, "key must not be null or empty");
  String delimeter = HBaseConfigConstants.PCAP_KEY_DELIMETER;
  String regex = "\\" + delimeter;
  StringBuffer sb = new StringBuffer();
  try {
    String[] tokens = key.split(regex);
    Assert
        .isTrue(
            (tokens.length == 5 || tokens.length == 6 || tokens.length == 7),
            "key is not in the format : 'srcAddr-dstAddr-protocol-srcPort-dstPort-{ipId-fragment identifier}'");
    sb.append(tokens[1]).append(delimeter).append(tokens[0])
        .append(delimeter).append(tokens[2]).append(delimeter)
        .append(tokens[4]).append(delimeter).append(tokens[3]);
  } catch (Exception e) {
    Log.warn("Failed to reverse the key. Reverse scan won't be performed.", e);
  }
  return sb.toString();
}
 
Example 13
Source Project: opensoc-streaming   Source File: PcapHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Builds the reverseKey to fetch the pcaps in the reverse traffic
 * (destination to source).
 * 
 * @param key
 *          indicates hbase rowKey (partial or full) in the format
 *          "srcAddr-dstAddr-protocol-srcPort-dstPort-fragment"
 * @return String indicates the key in the format
 *         "dstAddr-srcAddr-protocol-dstPort-srcPort"
 */
public static String reverseKey(String key) {
  Assert.hasText(key, "key must not be null or empty");
  String delimeter = HBaseConfigConstants.PCAP_KEY_DELIMETER;
  String regex = "\\" + delimeter;
  StringBuffer sb = new StringBuffer();
  try {
    String[] tokens = key.split(regex);
    Assert
        .isTrue(
            (tokens.length == 5 || tokens.length == 6 || tokens.length == 7),
            "key is not in the format : 'srcAddr-dstAddr-protocol-srcPort-dstPort-{ipId-fragment identifier}'");
    sb.append(tokens[1]).append(delimeter).append(tokens[0])
        .append(delimeter).append(tokens[2]).append(delimeter)
        .append(tokens[4]).append(delimeter).append(tokens[3]);
  } catch (Exception e) {
    Log.warn("Failed to reverse the key. Reverse scan won't be performed.", e);
  }
  return sb.toString();
}
 
Example 14
Source Project: RDFS   Source File: FileUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Change the permissions on a file / directory, recursively, if
 * needed.
 * @param filename name of the file whose permissions are to change
 * @param perm permission string
 * @param recursive true, if permissions should be changed recursively
 * @return the exit code from the command.
 * @throws IOException
 * @throws InterruptedException
 */
public static int chmod(String filename, String perm, boolean recursive)
                          throws IOException, InterruptedException {
  StringBuffer cmdBuf = new StringBuffer();
  cmdBuf.append("chmod ");
  if (recursive) {
    cmdBuf.append("-R ");
  }
  cmdBuf.append(perm).append(" ");
  cmdBuf.append(filename);
  String[] shellCmd = {"bash", "-c" ,cmdBuf.toString()};
  ShellCommandExecutor shExec = new ShellCommandExecutor(shellCmd);
  try {
    shExec.execute();
  }catch(IOException e) {
    if(Log.isDebugEnabled()) {
      Log.debug("Error while changing permission : " + filename 
          +" Exception: " + StringUtils.stringifyException(e));
    }
  }
  return shExec.getExitCode();
}
 
Example 15
protected URL findWebXml() throws IOException {
    //if an explicit web.xml file has been set (eg for jetty:run) then use it
    if (webXmlFile != null && webXmlFile.exists()) {
        return webXmlFile.toURI().toURL();
    }

    //if we haven't overridden location of web.xml file, use the
    //standard way of finding it
    Log.debug("Looking for web.xml file in WEB-INF");
    return super.findWebXml();
}
 
Example 16
protected URL findWebXml() throws IOException {
    //if an explicit web.xml file has been set (eg for jetty:run) then use it
    if (webXmlFile != null && webXmlFile.exists()) {
        return webXmlFile.toURI().toURL();
    }

    //if we haven't overridden location of web.xml file, use the
    //standard way of finding it
    Log.debug("Looking for web.xml file in WEB-INF");
    return super.findWebXml();
}
 
Example 17
Source Project: PoseidonX   Source File: MyJsonRowDeserializationSchema.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Row deserialize(byte[] message) throws IOException {
    try {
        JsonNode root = objectMapper.readTree(message);

        Row row = new Row(fieldNames.length);
        for (int i = 0; i < fieldNames.length; i++) {

            //数据源本来的字段名
            String columnName = columnNames[i];

            JsonNode node = root.get(columnName);

            if (node == null) {
                if (failOnMissingField) {
                    throw new IllegalStateException("Failed to find field with name '"
                            + fieldNames[i] + "'.");
                } else {
                    row.setField(i, null);
                }
            } else {
                // Read the value as specified type
                try {
                    Object value = objectMapper.treeToValue(node, fieldTypes[i].getTypeClass());
                    row.setField(i, value);
                }catch (Exception e){
                    Log.warn("Failed to deserialize JSON object.[" + new String(message, "UTF-8") + "]", e);
                    row.setField(i, null);
                }

            }
        }

        return row;
    } catch (Throwable t) {
        Log.warn("Failed to deserialize JSON object.[" + new String(message, "UTF-8") + "]", t);
        return null;
    }
}
 
Example 18
Source Project: hadoop   Source File: TestGreedyReservationAgent.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {

  long seed = rand.nextLong();
  rand.setSeed(seed);
  Log.info("Running with seed: " + seed);

  // setting completely loose quotas
  long timeWindow = 1000000L;
  Resource clusterCapacity = Resource.newInstance(100 * 1024, 100, 100);
  step = 1000L;
  ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil();
  String reservationQ = testUtil.getFullReservationQueueName();

  float instConstraint = 100;
  float avgConstraint = 100;

  ReservationSchedulerConfiguration conf =
      ReservationSystemTestUtil.createConf(reservationQ, timeWindow,
          instConstraint, avgConstraint);
  CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
  policy.init(reservationQ, conf);
  agent = new GreedyReservationAgent();

  QueueMetrics queueMetrics = mock(QueueMetrics.class);

  plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step,
      res, minAlloc, maxAlloc, "dedicated", null, true);
}
 
Example 19
Source Project: hadoop   Source File: MockNM.java    License: Apache License 2.0 5 votes vote down vote up
public NodeHeartbeatResponse nodeHeartbeat(ApplicationAttemptId attemptId,
    long containerId, ContainerState containerState) throws Exception {
  HashMap<ApplicationId, List<ContainerStatus>> nodeUpdate =
      new HashMap<ApplicationId, List<ContainerStatus>>(1);
  ContainerStatus containerStatus = BuilderUtils.newContainerStatus(
      BuilderUtils.newContainerId(attemptId, containerId), containerState,
      "Success", 0);
  ArrayList<ContainerStatus> containerStatusList =
      new ArrayList<ContainerStatus>(1);
  containerStatusList.add(containerStatus);
  Log.info("ContainerStatus: " + containerStatus);
  nodeUpdate.put(attemptId.getApplicationId(), containerStatusList);
  return nodeHeartbeat(nodeUpdate, true);
}
 
Example 20
Source Project: hadoop   Source File: MockNM.java    License: Apache License 2.0 5 votes vote down vote up
public NodeHeartbeatResponse nodeHeartbeat(Map<ApplicationId,
    List<ContainerStatus>> conts, boolean isHealthy, int resId) throws Exception {
  NodeHeartbeatRequest req = Records.newRecord(NodeHeartbeatRequest.class);
  NodeStatus status = Records.newRecord(NodeStatus.class);
  status.setResponseId(resId);
  status.setNodeId(nodeId);
  for (Map.Entry<ApplicationId, List<ContainerStatus>> entry : conts.entrySet()) {
    Log.info("entry.getValue() " + entry.getValue());
    status.setContainersStatuses(entry.getValue());
  }
  NodeHealthStatus healthStatus = Records.newRecord(NodeHealthStatus.class);
  healthStatus.setHealthReport("");
  healthStatus.setIsNodeHealthy(isHealthy);
  healthStatus.setLastHealthReportTime(1);
  status.setNodeHealthStatus(healthStatus);
  req.setNodeStatus(status);
  req.setLastKnownContainerTokenMasterKey(this.currentContainerTokenMasterKey);
  req.setLastKnownNMTokenMasterKey(this.currentNMTokenMasterKey);
  NodeHeartbeatResponse heartbeatResponse =
      resourceTracker.nodeHeartbeat(req);
  
  MasterKey masterKeyFromRM = heartbeatResponse.getContainerTokenMasterKey();
  if (masterKeyFromRM != null
      && masterKeyFromRM.getKeyId() != this.currentContainerTokenMasterKey
          .getKeyId()) {
    this.currentContainerTokenMasterKey = masterKeyFromRM;
  }

  masterKeyFromRM = heartbeatResponse.getNMTokenMasterKey();
  if (masterKeyFromRM != null
      && masterKeyFromRM.getKeyId() != this.currentNMTokenMasterKey
          .getKeyId()) {
    this.currentNMTokenMasterKey = masterKeyFromRM;
  }
  
  return heartbeatResponse;
}
 
Example 21
Source Project: hadoop   Source File: JobEndNotifier.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Notify a server of the completion of a submitted job. The user must have
 * configured MRJobConfig.MR_JOB_END_NOTIFICATION_URL
 * @param jobReport JobReport used to read JobId and JobStatus
 * @throws InterruptedException
 */
public void notify(JobReport jobReport)
  throws InterruptedException {
  // Do we need job-end notification?
  if (userUrl == null) {
    Log.info("Job end notification URL not set, skipping.");
    return;
  }

  //Do string replacements for jobId and jobStatus
  if (userUrl.contains(JOB_ID)) {
    userUrl = userUrl.replace(JOB_ID, jobReport.getJobId().toString());
  }
  if (userUrl.contains(JOB_STATUS)) {
    userUrl = userUrl.replace(JOB_STATUS, jobReport.getJobState().toString());
  }

  // Create the URL, ensure sanity
  try {
    urlToNotify = new URL(userUrl);
  } catch (MalformedURLException mue) {
    Log.warn("Job end notification couldn't parse " + userUrl, mue);
    return;
  }

  // Send notification
  boolean success = false;
  while (numTries-- > 0 && !success) {
    Log.info("Job end notification attempts left " + numTries);
    success = notifyURLOnce();
    if (!success) {
      Thread.sleep(waitInterval);
    }
  }
  if (!success) {
    Log.warn("Job end notification failed to notify : " + urlToNotify);
  } else {
    Log.info("Job end notification succeeded for " + jobReport.getJobId());
  }
}
 
Example 22
Source Project: hadoop   Source File: INodeReference.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public QuotaCounts cleanSubtree(BlockStoragePolicySuite bsps,
    final int snapshot, int prior, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  // since WithName node resides in deleted list acting as a snapshot copy,
  // the parameter snapshot must be non-null
  Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID);
  // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the
  // previous WithName instance
  if (prior == Snapshot.NO_SNAPSHOT_ID) {
    prior = getPriorSnapshot(this);
  }
  
  if (prior != Snapshot.NO_SNAPSHOT_ID
      && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) {
    return new QuotaCounts.Builder().build();
  }

  QuotaCounts counts = getReferredINode().cleanSubtree(bsps, snapshot, prior,
      collectedBlocks, removedINodes);
  INodeReference ref = getReferredINode().getParentReference();
  if (ref != null) {
    try {
      ref.addSpaceConsumed(counts.negation(), true);
    } catch (QuotaExceededException e) {
      Log.warn("Should not have QuotaExceededException");
    }
  }
  
  if (snapshot < lastSnapshotId) {
    // for a WithName node, when we compute its quota usage, we only count
    // in all the nodes existing at the time of the corresponding rename op.
    // Thus if we are deleting a snapshot before/at the snapshot associated 
    // with lastSnapshotId, we do not need to update the quota upwards.
    counts = new QuotaCounts.Builder().build();
  }
  return counts;
}
 
Example 23
Source Project: hadoop   Source File: ViewFileSystemTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * 
 * @param fsTarget - the target fs of the view fs.
 * @return return the ViewFS File context to be used for tests
 * @throws Exception
 */
static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  fsTarget.mkdirs(targetOfTests);


  // Set up viewfs link for test dir as described above
  String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
  
  
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");


  FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
  fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fsView.getWorkingDirectory());
  return fsView;
}
 
Example 24
Source Project: hadoop   Source File: ViewFileSystemTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
static void setUpHomeDir(Configuration conf, FileSystem fsTarget) {
  String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
  int indexOf2ndSlash = homeDir.indexOf('/', 1);
  if (indexOf2ndSlash >0) {
    linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
  } else { // home dir is at root. Just link the home dir itse
    URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
    ConfigUtil.addLink(conf, homeDir, linkTarget);
    Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
  }
  // Now set the root of the home dir for viewfs
  String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
  ConfigUtil.setHomeDirConf(conf, homeDirRoot);
  Log.info("Home dir base for viewfs" + homeDirRoot);  
}
 
Example 25
Source Project: hadoop   Source File: ViewFileSystemTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) {
  int indexOfEnd = path.indexOf('/', 1);
  if (Shell.WINDOWS) {
    indexOfEnd = path.indexOf('/', indexOfEnd + 1);
  }
  String firstComponent = path.substring(0, indexOfEnd);
  URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
  ConfigUtil.addLink(conf, firstComponent, linkTarget);
  Log.info("Added link for " + info + " " 
      + firstComponent + "->" + linkTarget);    
}
 
Example 26
Source Project: hadoop   Source File: ViewFsTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
static public FileContext setupForViewFsLocalFs(FileContextTestHelper helper) throws Exception {
  /**
   * create the test root on local_fs - the  mount table will point here
   */
  FileContext fsTarget = FileContext.getLocalFSFileContext();
  Path targetOfTests = helper.getTestRootPath(fsTarget);
  // In case previous test was killed before cleanup
  fsTarget.delete(targetOfTests, true);
  
  fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
  Configuration conf = new Configuration();
  
  // Set up viewfs link for test dir as described above
  String testDir = helper.getTestRootPath(fsTarget).toUri()
      .getPath();
  linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
  
  
  // Set up viewfs link for home dir as described above
  setUpHomeDir(conf, fsTarget);
    
  // the test path may be relative to working dir - we need to make that work:
  // Set up viewfs link for wd as described above
  String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
  linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
  
  FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
  fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
  Log.info("Working dir is: " + fc.getWorkingDirectory());
  //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
  //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
  return fc;
}
 
Example 27
Source Project: hadoop   Source File: ViewFsTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
  String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
  int indexOf2ndSlash = homeDir.indexOf('/', 1);
  if (indexOf2ndSlash >0) {
    linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
  } else { // home dir is at root. Just link the home dir itse
    URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
    ConfigUtil.addLink(conf, homeDir, linkTarget);
    Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
  }
  // Now set the root of the home dir for viewfs
  String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
  ConfigUtil.setHomeDirConf(conf, homeDirRoot);
  Log.info("Home dir base for viewfs" + homeDirRoot);  
}
 
Example 28
Source Project: hadoop   Source File: ViewFsTestSetup.java    License: Apache License 2.0 5 votes vote down vote up
static void linkUpFirstComponents(Configuration conf, String path,
    FileContext fsTarget, String info) {
  int indexOfEnd = path.indexOf('/', 1);
  if (Shell.WINDOWS) {
    indexOfEnd = path.indexOf('/', indexOfEnd + 1);
  }
  String firstComponent = path.substring(0, indexOfEnd);
  URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
  ConfigUtil.addLink(conf, firstComponent, linkTarget);
  Log.info("Added link for " + info + " " 
      + firstComponent + "->" + linkTarget);    
}
 
Example 29
Source Project: scava   Source File: GitHubSearchAPICallback.java    License: Eclipse Public License 2.0 5 votes vote down vote up
@Override
public void handleTotal(Response<R> response) {
	Integer totalCount = response.body().getTotalCount();
	if (totalCount > maxCount) {
		Log.info("Please note that for GitHub Search API requests, as totalCount (" + totalCount + ") > maxCount (" + maxCount
				+ "), only " + maxCount + " elements will be retrieved.");
		this.dataset.setTotal(maxCount);
	} else
		this.dataset.setTotal(totalCount);
}
 
Example 30
@Override
public void handleTotal(Response<R> response) {
	Integer totalCount = response.body().getTotalCount();
	if (totalCount != null && totalCount > maxCount) {
		Log.info("Please note that for this request, totalCount (" + totalCount + ") > maxCount (" + maxCount
				+ "), as such, only " + maxCount + " elements will be retrieved.");
		this.dataset.setTotal(maxCount);
	} else
		this.dataset.setTotal(totalCount);
}