org.apache.hadoop.hdfs.NameNodeProxies Java Examples

The following examples show how to use org.apache.hadoop.hdfs.NameNodeProxies. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: IPFailoverProxyProvider.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
Example #2
Source File: DfsServlet.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
Example #3
Source File: BackupNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
Example #4
Source File: EditLogBackupOutputStream.java    From hadoop with Apache License 2.0 6 votes vote down vote up
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                          JournalInfo journalInfo) // active name-node
throws IOException {
  super();
  this.bnRegistration = bnReg;
  this.journalInfo = journalInfo;
  InetSocketAddress bnAddress =
    NetUtils.createSocketAddr(bnRegistration.getAddress());
  try {
    this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
        bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
        true).getProxy();
  } catch(IOException e) {
    Storage.LOG.error("Error connecting to: " + bnAddress, e);
    throw e;
  }
  this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
  this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
 
Example #5
Source File: IPFailoverProxyProvider.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
Example #6
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
Example #7
Source File: DfsServlet.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
Example #8
Source File: BackupNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
Example #9
Source File: EditLogBackupOutputStream.java    From big-c with Apache License 2.0 6 votes vote down vote up
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                          JournalInfo journalInfo) // active name-node
throws IOException {
  super();
  this.bnRegistration = bnReg;
  this.journalInfo = journalInfo;
  InetSocketAddress bnAddress =
    NetUtils.createSocketAddr(bnRegistration.getAddress());
  try {
    this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
        bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
        true).getProxy();
  } catch(IOException e) {
    Storage.LOG.error("Error connecting to: " + bnAddress, e);
    throw e;
  }
  this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
  this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
 
Example #10
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
Example #11
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshUserToGroupsMapings for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshUserToGroupsMappings();
      System.out.println("Refresh user to groups mapping successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshUserToGroupsMappings();
    System.out.println("Refresh user to groups mapping successful");
  }
  
  return 0;
}
 
Example #12
Source File: UpstreamManager.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
public Upstream(ClientProtocol protocol,
                NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyAndInfo,
                NameNodeProxies.ProxyAndInfo<NamenodeProtocol> nnProxyAndInfo) {
    this.protocol = protocol;
    this.proxyAndInfo = proxyAndInfo;
    this.nnProxyAndInfo = nnProxyAndInfo;
}
 
Example #13
Source File: UpstreamManager.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
synchronized Upstream makeUpstream(UpstreamTicket ticket) throws IOException {
    if (ticket.user != null) {
        UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser(ticket.user,
                SaslRpcServer.AuthMethod.SIMPLE));
    } else {
        UserGroupInformation.setLoginUser(null);
    }
    URI fsUri = URI.create(ticket.fs);
    NameNodeProxies.ProxyAndInfo proxyAndInfo = NameNodeProxies.createProxy(conf, fsUri, ClientProtocol.class);
    NameNodeProxies.ProxyAndInfo nnProxyAndInfo = NameNodeProxies.createProxy(conf, fsUri, NamenodeProtocol.class);
    LOG.info("New upstream: " + ticket.user + "@" + ticket.fs);
    ClientProtocol clientProtocol = (ClientProtocol) proxyAndInfo.getProxy();
    return new Upstream(wrapWithThrottle(ticket.fs, clientProtocol, ClientProtocol.class), proxyAndInfo, nnProxyAndInfo);
}
 
Example #14
Source File: MiniNNProxy.java    From nnproxy with Apache License 2.0 5 votes vote down vote up
public MiniNNProxy(Configuration conf, String mountTable, MiniDFSCluster[] clusters) throws Exception {
    super(conf);
    this.mountTable = mountTable;
    this.clusters = clusters;
    this.mounts = new MockedMountsManager();
    this.start();


    UserGroupInformation curr = UserGroupInformation.getCurrentUser();
    clientProtocol = NameNodeProxies.createNonHAProxy(conf,
            getRpcAddress(), ClientProtocol.class,
            curr, false).getProxy();
    dfs = new DFSClient(URI.create("hdfs://127.0.0.1:" + getRpcAddress().getPort()), conf);
    fs = FileSystem.newInstance(URI.create("hdfs://127.0.0.1:" + getRpcAddress().getPort()), conf, curr.getUserName());
}
 
Example #15
Source File: ConfiguredFailoverProxyProvider.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Lazily initialize the RPC proxy object.
 */
@Override
public synchronized ProxyInfo<T> getProxy() {
  AddressRpcProxyPair<T> current = proxies.get(currentProxyIndex);
  if (current.namenode == null) {
    try {
      current.namenode = NameNodeProxies.createNonHAProxy(conf,
          current.address, xface, ugi, false, fallbackToSimpleAuth).getProxy();
    } catch (IOException e) {
      LOG.error("Failed to create RPC proxy to NameNode", e);
      throw new RuntimeException(e);
    }
  }
  return new ProxyInfo<T>(current.namenode, current.address.toString());
}
 
Example #16
Source File: BootstrapStandby.java    From big-c with Apache License 2.0 5 votes vote down vote up
private NamenodeProtocol createNNProtocolProxy()
    throws IOException {
  return NameNodeProxies.createNonHAProxy(getConf(),
      otherIpcAddr, NamenodeProtocol.class,
      UserGroupInformation.getLoginUser(), true)
      .getProxy();
}
 
Example #17
Source File: NameNodeConnector.java    From big-c with Apache License 2.0 5 votes vote down vote up
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
                         List<Path> targetPaths, Configuration conf,
                         int maxNotChangedIterations)
    throws IOException {
  this.nameNodeUri = nameNodeUri;
  this.idPath = idPath;
  this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
      .asList(new Path("/")) : targetPaths;
  this.maxNotChangedIterations = maxNotChangedIterations;

  this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
      NamenodeProtocol.class).getProxy();
  this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
      ClientProtocol.class, fallbackToSimpleAuth).getProxy();
  this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);

  final NamespaceInfo namespaceinfo = namenode.versionRequest();
  this.blockpoolID = namespaceinfo.getBlockPoolID();

  final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
  this.keyManager = new KeyManager(blockpoolID, namenode,
      defaults.getEncryptDataTransfer(), conf);
  // if it is for test, we do not create the id file
  out = checkAndMarkRunning();
  if (out == null) {
    // Exit if there is another one running.
    throw new IOException("Another " + name + " is running.");
  }
}
 
Example #18
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }
  
  return 0;
}
 
Example #19
Source File: ContextCommands.java    From hdfs-shell with Apache License 2.0 5 votes vote down vote up
@PostConstruct
public void init() {
    try {
        final HdfsConfiguration conf = new HdfsConfiguration();
        userMappingsProtocol = NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
                GetUserMappingsProtocol.class).getProxy();
    } catch (Exception e) {
        logger.error("Failed to create proxy to get user groups", e);
    }
}
 
Example #20
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshUserMappingsProtocol.class);
    for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
      proxy.getProxy().refreshSuperUserGroupsConfiguration();
      System.out.println("Refresh super user groups configuration " +
          "successful for " + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshUserMappingsProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshUserMappingsProtocol.class).getProxy();

    // Refresh the user-to-groups mappings
    refreshProtocol.refreshSuperUserGroupsConfiguration();
    System.out.println("Refresh super user groups configuration successful");
  }

  return 0;
}
 
Example #21
Source File: DFSAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
Example #22
Source File: TestBalancerWithNodeGroup.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create a 4 nodes cluster: 2 nodes (n0, n1) in RACK0/NODEGROUP0, 1 node (n2)
 * in RACK1/NODEGROUP1 and 1 node (n3) in RACK1/NODEGROUP2. Fill the cluster 
 * to 60% and 3 replicas, so n2 and n3 will have replica for all blocks according
 * to replica placement policy with NodeGroup. As a result, n2 and n3 will be
 * filled with 80% (60% x 4 / 3), and no blocks can be migrated from n2 and n3
 * to n0 or n1 as balancer policy with node group. Thus, we expect the balancer
 * to end in 5 iterations without move block process.
 */
@Test(timeout=60000)
public void testBalancerEndInNoMoveProgress() throws Exception {
  Configuration conf = createConf();
  long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY, CAPACITY};
  String[] racks = new String[]{RACK0, RACK0, RACK1, RACK1};
  String[] nodeGroups = new String[]{NODEGROUP0, NODEGROUP0, NODEGROUP1, NODEGROUP2};
  
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  assertEquals(numOfDatanodes, nodeGroups.length);
  MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities);
  MiniDFSClusterWithNodeGroup.setNodeGroups(nodeGroups);
  cluster = new MiniDFSClusterWithNodeGroup(builder);
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, 
        cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = TestBalancer.sum(capacities);
    // fill up the cluster to be 60% full
    long totalUsedSpace = totalCapacity * 6 / 10;
    TestBalancer.createFile(cluster, filePath, totalUsedSpace / 3, 
        (short) (3), 0);

    // run balancer which can finish in 5 iterations with no block movement.
    runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);

  } finally {
    cluster.shutdown();
  }
}
 
Example #23
Source File: TestBalancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private ExtendedBlock[] generateBlocks(Configuration conf, long size,
    short numNodes) throws IOException, InterruptedException, TimeoutException {
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    short replicationFactor = (short)(numNodes-1);
    long fileLen = size/replicationFactor;
    createFile(cluster , filePath, fileLen, replicationFactor, 0);

    List<LocatedBlock> locatedBlocks = client.
    getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();

    int numOfBlocks = locatedBlocks.size();
    ExtendedBlock[] blocks = new ExtendedBlock[numOfBlocks];
    for(int i=0; i<numOfBlocks; i++) {
      ExtendedBlock b = locatedBlocks.get(i).getBlock();
      blocks[i] = new ExtendedBlock(b.getBlockPoolId(), b.getBlockId(), b
          .getNumBytes(), b.getGenerationStamp());
    }

    return blocks;
  } finally {
    cluster.shutdown();
  }
}
 
Example #24
Source File: TestBalancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testUnevenDistribution(Configuration conf,
    long distribution[], long capacities[], String[] racks) throws Exception {
  int numDatanodes = distribution.length;
  if (capacities.length != numDatanodes || racks.length != numDatanodes) {
    throw new IllegalArgumentException("Array length is not the same");
  }

  // calculate total space that need to be filled
  final long totalUsedSpace = sum(distribution);

  // fill the cluster
  ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
      (short) numDatanodes);

  // redistribute blocks
  Block[][] blocksDN = distributeBlocks(
      blocks, (short)(numDatanodes-1), distribution);

  // restart the cluster: do NOT format the cluster
  conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
                                            .format(false)
                                            .racks(racks)
                                            .simulatedCapacities(capacities)
                                            .build();
  cluster.waitActive();
  client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
      ClientProtocol.class).getProxy();

  for(int i = 0; i < blocksDN.length; i++)
    cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);

  final long totalCapacity = sum(capacities);
  runBalancer(conf, totalUsedSpace, totalCapacity);
  cluster.shutdown();
}
 
Example #25
Source File: TestBalancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testBalancerDefaultConstructor(Configuration conf,
    long[] capacities, String[] racks, long newCapacity, String newRack)
    throws Exception {
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  cluster = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities)
                              .build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
        new long[] { newCapacity });

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);
  } finally {
    cluster.shutdown();
  }
}
 
Example #26
Source File: TestBalancer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void testBalancerDefaultConstructor(Configuration conf,
    long[] capacities, String[] racks, long newCapacity, String newRack)
    throws Exception {
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  cluster = new MiniDFSCluster.Builder(conf)
                              .numDataNodes(capacities.length)
                              .racks(racks)
                              .simulatedCapacities(capacities)
                              .build();
  try {
    cluster.waitActive();
    client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
        ClientProtocol.class).getProxy();

    long totalCapacity = sum(capacities);

    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
        (short) numOfDatanodes, 0);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
        new long[] { newCapacity });

    totalCapacity += newCapacity;

    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);
  } finally {
    cluster.shutdown();
  }
}
 
Example #27
Source File: ConfiguredFailoverProxyProvider.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Lazily initialize the RPC proxy object.
 */
@Override
public synchronized ProxyInfo<T> getProxy() {
  AddressRpcProxyPair<T> current = proxies.get(currentProxyIndex);
  if (current.namenode == null) {
    try {
      current.namenode = NameNodeProxies.createNonHAProxy(conf,
          current.address, xface, ugi, false, fallbackToSimpleAuth).getProxy();
    } catch (IOException e) {
      LOG.error("Failed to create RPC proxy to NameNode", e);
      throw new RuntimeException(e);
    }
  }
  return new ProxyInfo<T>(current.namenode, current.address.toString());
}
 
Example #28
Source File: BootstrapStandby.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private NamenodeProtocol createNNProtocolProxy()
    throws IOException {
  return NameNodeProxies.createNonHAProxy(getConf(),
      otherIpcAddr, NamenodeProtocol.class,
      UserGroupInformation.getLoginUser(), true)
      .getProxy();
}
 
Example #29
Source File: NameNodeConnector.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
                         List<Path> targetPaths, Configuration conf,
                         int maxNotChangedIterations)
    throws IOException {
  this.nameNodeUri = nameNodeUri;
  this.idPath = idPath;
  this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
      .asList(new Path("/")) : targetPaths;
  this.maxNotChangedIterations = maxNotChangedIterations;

  this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
      NamenodeProtocol.class).getProxy();
  this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
      ClientProtocol.class, fallbackToSimpleAuth).getProxy();
  this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);

  final NamespaceInfo namespaceinfo = namenode.versionRequest();
  this.blockpoolID = namespaceinfo.getBlockPoolID();

  final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
  this.keyManager = new KeyManager(blockpoolID, namenode,
      defaults.getEncryptDataTransfer(), conf);
  // if it is for test, we do not create the id file
  out = checkAndMarkRunning();
  if (out == null) {
    // Exit if there is another one running.
    throw new IOException("Another " + name + " is running.");
  }
}
 
Example #30
Source File: DFSAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshServiceAcl for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshAuthorizationPolicyProtocol.class);
    for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
      proxy.getProxy().refreshServiceAcl();
      System.out.println("Refresh service acl successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshAuthorizationPolicyProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshAuthorizationPolicyProtocol.class).getProxy();
    // Refresh the authorization policy in-effect
    refreshProtocol.refreshServiceAcl();
    System.out.println("Refresh service acl successful");
  }
  
  return 0;
}