Java Code Examples for org.apache.hadoop.hdfs.server.namenode.NameNode

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.NameNode. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: RDFS   Source File: TestHDFSServerPorts.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verify secondary name-node port usage.
 */
public void testSecondaryNodePorts() throws Exception {
  NameNode nn = null;
  try {
    nn = startNameNode();

    // bind http server to the same port as name-node
    Configuration conf2 = new Configuration(config);
    conf2.set("dfs.secondary.http.address", 
              config.get("dfs.http.address"));
    SecondaryNameNode.LOG.info("= Starting 1 on: " + 
                               conf2.get("dfs.secondary.http.address"));
    boolean started = canStartSecondaryNode(conf2);
    assertFalse(started); // should fail

    // bind http server to a different port
    conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
    SecondaryNameNode.LOG.info("= Starting 2 on: " + 
                               conf2.get("dfs.secondary.http.address"));
    started = canStartSecondaryNode(conf2);
    assertTrue(started); // should start now
  } finally {
    stopNameNode(nn);
  }
}
 
Example 2
Source Project: hadoop   Source File: TestHAStateTransitions.java    License: Apache License 2.0 6 votes vote down vote up
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
 
Example 3
Source Project: hadoop   Source File: DelegationTokenSecretManager.java    License: Apache License 2.0 6 votes vote down vote up
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example 4
Source Project: big-c   Source File: DelegationTokenSecretManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
Example 5
Source Project: big-c   Source File: IPFailoverProxyProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
Example 6
Source Project: RDFS   Source File: TestDecommission.java    License: Apache License 2.0 6 votes vote down vote up
private void verifyStats(NameNode namenode, FSNamesystem fsn,
    DatanodeInfo node, boolean decommissioning) throws InterruptedException, IOException{
  // Do the stats check over 10 iterations
  for (int i = 0; i < 10; i++) {
    long[] newStats = namenode.getStats();

    // For decommissioning nodes, ensure capacity of the DN is no longer
    // counted. Only used space of the DN is counted in cluster capacity
    assertEquals(newStats[0], decommissioning ? node.getDfsUsed() : 
      node.getCapacity());

    // Ensure cluster used capacity is counted for both normal and
    // decommissioning nodes
    assertEquals(newStats[1], node.getDfsUsed());

    // For decommissioning nodes, remaining space from the DN is not counted
    assertEquals(newStats[2], decommissioning ? 0 : node.getRemaining());

    // Ensure transceiver count is same as that DN
    assertEquals(fsn.getTotalLoad(), node.getXceiverCount());
    
    Thread.sleep(HEARTBEAT_INTERVAL * 1000); // Sleep heart beat interval
  }
}
 
Example 7
Source Project: hadoop   Source File: IPFailoverProxyProvider.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
Example 8
Source Project: RDFS   Source File: FastCopySetupUtil.java    License: Apache License 2.0 6 votes vote down vote up
public void testFastCopyMultiple(boolean hardlink) throws Exception {
  // Create a source file.
  String src = "/testFastCopyMultipleSrc" + hardlink;
  generateRandomFile(fs, src, FILESIZE);
  String destination = "/testFastCopyMultipleDestination" + hardlink;
  FastCopy fastCopy = new FastCopy(conf);
  List<FastFileCopyRequest> requests = new ArrayList<FastFileCopyRequest>();
  for (int i = 0; i < COPIES; i++) {
    requests.add(new FastFileCopyRequest(src, destination + i, fs, fs));
  }
  NameNode namenode = cluster.getNameNode();
  try {
    fastCopy.copy(requests);
    for (FastFileCopyRequest r : requests) {
      assertTrue(verifyCopiedFile(r.getSrc(), r.getDestination(), namenode,
          namenode, fs, fs, hardlink));
      verifyFileStatus(r.getDestination(), namenode, fastCopy);
    }
  } catch (Exception e) {
    LOG.error("Fast Copy failed with exception : ", e);
    fail("Fast Copy failed");
  } finally {
    fastCopy.shutdown();
  }
  assertTrue(pass);
}
 
Example 9
Source Project: RDFS   Source File: NameNodeMetrics.java    License: Apache License 2.0 6 votes vote down vote up
public NameNodeMetrics(Configuration conf, NameNode nameNode) {
  String sessionId = conf.get("session.id");
  // Initiate Java VM metrics
  JvmMetrics.init("NameNode", sessionId);

  
  // Now the Mbean for the name node - this alos registers the MBean
  namenodeActivityMBean = new NameNodeActivtyMBean(registry);
  
  // Create a record for NameNode metrics
  MetricsContext metricsContext = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(metricsContext, "namenode");
  metricsRecord.setTag("sessionId", sessionId);
  metricsContext.registerUpdater(this);
  log.info("Initializing NameNodeMeterics using context object:" +
            metricsContext.getClass().getName());
}
 
Example 10
Source Project: hadoop   Source File: BootstrapStandby.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
  parseArgs(args);
  parseConfAndFindOtherNN();
  NameNode.checkAllowFormat(conf);

  InetSocketAddress myAddr = NameNode.getAddress(conf);
  SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());

  return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
    @Override
    public Integer run() {
      try {
        return doRun();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  });
}
 
Example 11
Source Project: hadoop   Source File: DatanodeManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Remove a datanode
 * @throws UnregisteredNodeException 
 */
public void removeDatanode(final DatanodeID node
    ) throws UnregisteredNodeException {
  namesystem.writeLock();
  try {
    final DatanodeDescriptor descriptor = getDatanode(node);
    if (descriptor != null) {
      removeDatanode(descriptor);
    } else {
      NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
                                   + node + " does not exist");
    }
  } finally {
    namesystem.writeUnlock();
  }
}
 
Example 12
Source Project: hadoop   Source File: DatanodeManager.java    License: Apache License 2.0 6 votes vote down vote up
/** Remove a dead datanode. */
void removeDeadDatanode(final DatanodeID nodeID) {
    synchronized(datanodeMap) {
      DatanodeDescriptor d;
      try {
        d = getDatanode(nodeID);
      } catch(IOException e) {
        d = null;
      }
      if (d != null && isDatanodeDead(d)) {
        NameNode.stateChangeLog.info(
            "BLOCK* removeDeadDatanode: lost heartbeat from " + d);
        removeDatanode(d);
      }
    }
}
 
Example 13
Source Project: big-c   Source File: TestHAStateTransitions.java    License: Apache License 2.0 6 votes vote down vote up
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
    NameNode nn, boolean writeHeader) throws IOException {
  long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
  URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
  File sharedEditsDir = new File(sharedEditsUri.getPath());
  StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
  File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
      txid + 1);
  assertTrue("Failed to create in-progress edits file",
      inProgressFile.createNewFile());
  
  if (writeHeader) {
    DataOutputStream out = new DataOutputStream(new FileOutputStream(
        inProgressFile));
    EditLogFileOutputStream.writeHeader(
        NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
    out.close();
  }
}
 
Example 14
Source Project: RDFS   Source File: TestHDFSServerPorts.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Start the name-node.
 */
public NameNode startNameNode() throws IOException {
  String dataDir = System.getProperty("test.build.data");
  hdfsDir = new File(dataDir, "dfs");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  config = new Configuration();
  config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
  NameNode.format(config);

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return NameNode.createNameNode(args, config);
}
 
Example 15
Source Project: big-c   Source File: TestDFSShellGenericOptions.java    License: Apache License 2.0 6 votes vote down vote up
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
Example 16
Source Project: RDFS   Source File: MiniDFSCluster.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns true if the NameNode is running and is out of Safe Mode
 * or if waiting for safe mode is disabled.
 */
public boolean isNameNodeUp(int nnIndex) {
  NameNode nn = nameNodes[nnIndex].nameNode;
  if (nn == null) {
    return false;
  }
  try {
    long[] sizes = nn.getStats();
    boolean isUp = false;
    synchronized (this) {
      isUp = ((!nn.isInSafeMode() || !waitSafeMode) && sizes[0] != 0);
    }
    return isUp;
  } catch (IOException ie) {
    return false;
  }
}
 
Example 17
Source Project: hadoop   Source File: TestDFSShellGenericOptions.java    License: Apache License 2.0 6 votes vote down vote up
private void execute(String [] args, String namenode) {
  FsShell shell=new FsShell();
  FileSystem fs=null;
  try {
    ToolRunner.run(shell, args);
    fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
        shell.getConf());
    assertTrue("Directory does not get created", 
               fs.isDirectory(new Path("/data")));
    fs.delete(new Path("/data"), true);
  } catch (Exception e) {
    System.err.println(e.getMessage());
    e.printStackTrace();
  } finally {
    if (fs!=null) {
      try {
        fs.close();
      } catch (IOException ignored) {
      }
    }
  }
}
 
Example 18
Source Project: RDFS   Source File: AvatarDataNode.java    License: Apache License 2.0 6 votes vote down vote up
private static List<InetSocketAddress> getDatanodeProtocolAddresses(
    Configuration conf, Collection<String> serviceIds) throws IOException {
  // Use default address as fall back
  String defaultAddress;
  try {
    defaultAddress = conf.get(FileSystem.FS_DEFAULT_NAME_KEY);
    if (defaultAddress != null) {
      Configuration newConf = new Configuration(conf);
      newConf.set(FileSystem.FS_DEFAULT_NAME_KEY, defaultAddress);
      defaultAddress = NameNode.getHostPortString(NameNode.getAddress(newConf));
    }
  } catch (IllegalArgumentException e) {
    defaultAddress = null;
  }
  
  List<InetSocketAddress> addressList = DFSUtil.getAddresses(conf,
      serviceIds, defaultAddress,
      NameNode.DATANODE_PROTOCOL_ADDRESS,
      FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY);
  if (addressList == null) {
    throw new IOException("Incorrect configuration: namenode address "
        + FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY
        + " is not configured.");
  }
  return addressList;
}
 
Example 19
Source Project: hadoop   Source File: TestDFSClientFailover.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that client failover works when an active NN dies and the standby
 * takes over.
 */
@Test
public void testDfsClientFailover() throws IOException, URISyntaxException {
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  
  DFSTestUtil.createFile(fs, TEST_FILE,
      FILE_LENGTH_TO_VERIFY, (short)1, 1L);
  
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  cluster.shutdownNameNode(0);
  cluster.transitionToActive(1);
  assertEquals(fs.getFileStatus(TEST_FILE).getLen(), FILE_LENGTH_TO_VERIFY);
  
  // Check that it functions even if the URL becomes canonicalized
  // to include a port number.
  Path withPort = new Path("hdfs://" +
      HATestUtil.getLogicalHostname(cluster) + ":" +
      NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
  FileSystem fs2 = withPort.getFileSystem(fs.getConf());
  assertTrue(fs2.exists(withPort));

  fs.close();
}
 
Example 20
Source Project: hadoop-gpu   Source File: TestHDFSServerPorts.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Start the name-node.
 */
public NameNode startNameNode() throws IOException {
  String dataDir = System.getProperty("test.build.data");
  hdfsDir = new File(dataDir, "dfs");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  config = new Configuration();
  config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
  NameNode.format(config);

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return NameNode.createNameNode(args, config);
}
 
Example 21
Source Project: hadoop   Source File: TestDFSHAAdminMiniCluster.java    License: Apache License 2.0 6 votes vote down vote up
@Test 
public void testStateTransition() throws Exception {
  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  assertEquals(0, runTool("-transitionToActive", "nn1"));
  assertFalse(nnode1.isStandbyState());       
  assertEquals(0, runTool("-transitionToStandby", "nn1"));
  assertTrue(nnode1.isStandbyState());
  
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());
  assertEquals(0, runTool("-transitionToActive", "nn2"));
  assertFalse(nnode2.isStandbyState());
  assertEquals(0, runTool("-transitionToStandby", "nn2"));
  assertTrue(nnode2.isStandbyState());
}
 
Example 22
Source Project: hadoop   Source File: TestFileSystem.java    License: Apache License 2.0 5 votes vote down vote up
public void testFsCache() throws Exception {
  {
    long now = System.currentTimeMillis();
    String[] users = new String[]{"foo","bar"};
    final Configuration conf = new Configuration();
    FileSystem[] fs = new FileSystem[users.length];

    for(int i = 0; i < users.length; i++) {
      UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
      fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
      }});
      for(int j = 0; j < i; j++) {
        assertFalse(fs[j] == fs[i]);
      }
    }
    FileSystem.closeAll();
  }
  
  {
    try {
      runTestCache(NameNode.DEFAULT_PORT);
    } catch(java.net.BindException be) {
      LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
          + NameNode.DEFAULT_PORT + ")", be);
    }

    runTestCache(0);
  }
}
 
Example 23
Source Project: hadoop-gpu   Source File: TestDefaultNameNodePort.java    License: Apache License 2.0 5 votes vote down vote up
public void testGetAddressFromConf() throws Exception {
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://foo/");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
  FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
  assertEquals(NameNode.getAddress(conf).getPort(), 555);
  FileSystem.setDefaultUri(conf, "foo");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
}
 
Example 24
Source Project: big-c   Source File: TestMRCredentials.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * run a distributed job and verify that TokenCache is available
 * @throws IOException
 */
@Test
public void test () throws IOException {

  // make sure JT starts
  Configuration jobConf =  new JobConf(mrCluster.getConfig());

  // provide namenodes names for the job to get the delegation tokens for
  //String nnUri = dfsCluster.getNameNode().getUri(namenode).toString();
  NameNode nn = dfsCluster.getNameNode();
  URI nnUri = NameNode.getUri(nn.getNameNodeAddress());
  jobConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString());


  jobConf.set("mapreduce.job.credentials.json" , "keys.json");

  // using argument to pass the file name
  String[] args = {
      "-m", "1", "-r", "1", "-mt", "1", "-rt", "1"
  };

  int res = -1;
  try {
    res = ToolRunner.run(jobConf, new CredentialsTestJob(), args);
  } catch (Exception e) {
    System.out.println("Job failed with" + e.getLocalizedMessage());
    e.printStackTrace(System.out);
    fail("Job failed");
  }
  assertEquals("dist job res is not 0", res, 0);

}
 
Example 25
Source Project: hadoop-gpu   Source File: TestDefaultNameNodePort.java    License: Apache License 2.0 5 votes vote down vote up
public void testGetAddressFromString() throws Exception {
  assertEquals(NameNode.getAddress("foo").getPort(),
               NameNode.DEFAULT_PORT);
  assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
               NameNode.DEFAULT_PORT);
  assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
               555);
  assertEquals(NameNode.getAddress("foo:555").getPort(),
               555);
}
 
Example 26
Source Project: big-c   Source File: TestFiPipelines.java    License: Apache License 2.0 5 votes vote down vote up
private static void initLoggers() {
  ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL);
  ((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL);
}
 
Example 27
Source Project: hadoop   Source File: NameNodeProxies.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates the namenode proxy with the passed protocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 *
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 **/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(conf, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider == null) {
    // Non-HA case
    return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
        UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
  } else {
    // HA case
    Conf config = new Conf(conf);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
        RetryPolicies.failoverOnNetworkException(
            RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
            config.maxRetryAttempts, config.failoverSleepBaseMillis,
            config.failoverSleepMaxMillis));

    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  }
}
 
Example 28
Source Project: big-c   Source File: TestInitializeSharedEdits.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testInitializeSharedEditsConfiguresGenericConfKeys() throws IOException {
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
      "ns1"), "nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "ns1", "nn1"), "localhost:1234");
  assertNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
  NameNode.initializeSharedEdits(conf);
  assertNotNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
}
 
Example 29
Source Project: hadoop   Source File: TestDNFencing.java    License: Apache License 2.0 5 votes vote down vote up
private void doMetasave(NameNode nn2) {
  nn2.getNamesystem().writeLock();
  try {
    PrintWriter pw = new PrintWriter(System.err);
    nn2.getNamesystem().getBlockManager().metaSave(pw);
    pw.flush();
  } finally {
    nn2.getNamesystem().writeUnlock();
  }
}
 
Example 30
Source Project: hadoop   Source File: TestStandbyIsHot.java    License: Apache License 2.0 5 votes vote down vote up
static void waitForBlockLocations(final MiniDFSCluster cluster,
    final NameNode nn,
    final String path, final int expectedReplicas)
    throws Exception {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    
    @Override
    public Boolean get() {
      try {
        LocatedBlocks locs = NameNodeAdapter.getBlockLocations(nn, path, 0, 1000);
        DatanodeInfo[] dnis = locs.getLastLocatedBlock().getLocations();
        for (DatanodeInfo dni : dnis) {
          Assert.assertNotNull(dni);
        }
        int numReplicas = dnis.length;
        
        LOG.info("Got " + numReplicas + " locs: " + locs);
        if (numReplicas > expectedReplicas) {
          cluster.triggerDeletionReports();
        }
        cluster.triggerHeartbeats();
        return numReplicas == expectedReplicas;
      } catch (IOException e) {
        LOG.warn("No block locations yet: " + e.getMessage());
        return false;
      }
    }
  }, 500, 20000);
  
}