Java Code Examples for org.apache.hadoop.security.UserGroupInformation#doAs()

The following examples show how to use org.apache.hadoop.security.UserGroupInformation#doAs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HivePurgerPublisher.java    From incubator-gobblin with Apache License 2.0 7 votes vote down vote up
public void initHiveMetastoreClient() throws Exception {
  if (this.state.contains(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION)) {
    String superUser = this.state.getProp(ComplianceConfigurationKeys.GOBBLIN_COMPLIANCE_SUPER_USER);
    String realm = this.state.getProp(ConfigurationKeys.KERBEROS_REALM);
    String keytabLocation = this.state.getProp(ConfigurationKeys.SUPER_USER_KEY_TAB_LOCATION);
    log.info("Establishing MetastoreClient connection using " + keytabLocation);

    UserGroupInformation.loginUserFromKeytab(HostUtils.getPrincipalUsingHostname(superUser, realm), keytabLocation);
    UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
    loginUser.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws TException {
        HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
        return null;
      }
    });
  } else {
    HivePurgerPublisher.this.client = new HiveMetaStoreClient(new HiveConf());
  }
}
 
Example 2
Source File: HBaseRangerAuthorizationTest.java    From ranger with Apache License 2.0 6 votes vote down vote up
@Test
public void testWriteRowAsGroupIT() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", "" + port);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    String user = "IT";

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            Connection conn = ConnectionFactory.createConnection(conf);
            Table table = conn.getTable(TableName.valueOf("temp"));
            
            // Add a new row
            Put put = new Put(Bytes.toBytes("row3"));
            put.addColumn(Bytes.toBytes("colfam1"), Bytes.toBytes("col1"), Bytes.toBytes("val2"));
            table.put(put);
            
            conn.close();
            return null;
        }
    });
}
 
Example 3
Source File: ImpersonationUtil.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private static FileSystem createFileSystem(UserGroupInformation proxyUserUgi, final Configuration fsConf,
                                           final Path path) {
  FileSystem fs;
  try {
    fs = proxyUserUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
      @Override
      public FileSystem run() throws Exception {
        logger.trace("Creating FileSystemWrapper for proxy user: " + UserGroupInformation.getCurrentUser());
        return HadoopFileSystem.get(path, fsConf);
      }
    });
  } catch (InterruptedException | IOException e) {
    final String errMsg = "Failed to create FileSystemWrapper for proxy user: " + e.getMessage();
    logger.error(errMsg, e);
    throw new RuntimeException(errMsg, e);
  }

  return fs;
}
 
Example 4
Source File: BlockReaderLocalLegacy.java    From big-c with Apache License 2.0 6 votes vote down vote up
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
 
Example 5
Source File: TeradataToHdfsJobRunnerMain.java    From azkaban-plugins with Apache License 2.0 6 votes vote down vote up
public void run() throws IOException, InterruptedException {
  String jobName = System.getenv(AbstractProcessJob.JOB_NAME_ENV);
  _logger.info("Running job " + jobName);

  if (HadoopSecureWrapperUtils.shouldProxy(_jobProps)) {
    String tokenFile = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
    UserGroupInformation proxyUser =
        HadoopSecureWrapperUtils.setupProxyUser(_jobProps, tokenFile, _logger);

    proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        runCopyTdToHdfs();
        return null;
      }
    });
  } else {
    runCopyTdToHdfs();
  }
}
 
Example 6
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 *  @return a {@link HftpFileSystem} object as specified user. 
 */
public HftpFileSystem getHftpFileSystemAs(final String username,
    final Configuration conf, final int nnIndex, final String... groups)
    throws IOException, InterruptedException {
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      username, groups);
  return ugi.doAs(new PrivilegedExceptionAction<HftpFileSystem>() {
    @Override
    public HftpFileSystem run() throws Exception {
      return getHftpFileSystem(nnIndex);
    }
  });
}
 
Example 7
Source File: TestShadeSaslAuthenticationProvider.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Before
public void createTable() throws Exception {
  tableName = TableName.valueOf(name.getMethodName());

  // Create a table and write a record as the service user (hbase)
  UserGroupInformation serviceUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
      "hbase/localhost", KEYTAB_FILE.getAbsolutePath());
  clusterId = serviceUgi.doAs(new PrivilegedExceptionAction<String>() {
    @Override public String run() throws Exception {
      try (Connection conn = ConnectionFactory.createConnection(CONF);
          Admin admin = conn.getAdmin();) {
        admin.createTable(TableDescriptorBuilder
            .newBuilder(tableName)
            .setColumnFamily(ColumnFamilyDescriptorBuilder.of("f1"))
            .build());

        UTIL.waitTableAvailable(tableName);

        try (Table t = conn.getTable(tableName)) {
          Put p = new Put(Bytes.toBytes("r1"));
          p.addColumn(Bytes.toBytes("f1"), Bytes.toBytes("q1"), Bytes.toBytes("1"));
          t.put(p);
        }

        return admin.getClusterMetrics().getClusterId();
      }
    }
  });

  assertNotNull(clusterId);
}
 
Example 8
Source File: AbstractHdfsConnector.java    From pulsar with Apache License 2.0 5 votes vote down vote up
protected FileSystem getFileSystemAsUser(final Configuration config, UserGroupInformation ugi) throws IOException {
    try {
        return ugi.doAs((PrivilegedExceptionAction<FileSystem>) () -> FileSystem.get(config));
    } catch (InterruptedException e) {
        throw new IOException("Unable to create file system: " + e.getMessage());
    }
}
 
Example 9
Source File: HBaseRangerAuthorizationTest.java    From ranger with Apache License 2.0 5 votes vote down vote up
@Test
public void testWriteRowInColFam2AsGroupIT() throws Exception {
    final Configuration conf = HBaseConfiguration.create();
    conf.set("hbase.zookeeper.quorum", "localhost");
    conf.set("hbase.zookeeper.property.clientPort", "" + port);
    conf.set("zookeeper.znode.parent", "/hbase-unsecure");

    String user = "IT";

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"});
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
            Connection conn = ConnectionFactory.createConnection(conf);
            Table table = conn.getTable(TableName.valueOf("temp"));
            
            // Add a new row
            try {
                Put put = new Put(Bytes.toBytes("row3"));
                put.addColumn(Bytes.toBytes("colfam2"), Bytes.toBytes("col1"), Bytes.toBytes("val2"));
                table.put(put);
                Assert.fail("Failure expected on an unauthorized user");
            } catch (IOException ex) {
                // expected
            }
            
            conn.close();
            return null;
        }
    });
}
 
Example 10
Source File: TestAMRMRPCResponseId.java    From big-c with Apache License 2.0 5 votes vote down vote up
private AllocateResponse allocate(ApplicationAttemptId attemptId,
    final AllocateRequest req) throws Exception {
  UserGroupInformation ugi =
      UserGroupInformation.createRemoteUser(attemptId.toString());
  org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token =
      rm.getRMContext().getRMApps().get(attemptId.getApplicationId())
        .getRMAppAttempt(attemptId).getAMRMToken();
  ugi.addTokenIdentifier(token.decodeIdentifier());
  return ugi.doAs(new PrivilegedExceptionAction<AllocateResponse>() {
    @Override
    public AllocateResponse run() throws Exception {
      return amService.allocate(req);
    }
  });
}
 
Example 11
Source File: FileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Get a filesystem instance based on the uri, the passed
 * configuration and the user
 * @param uri of the filesystem
 * @param conf the configuration to use
 * @param user to perform the get as
 * @return the filesystem instance
 * @throws IOException
 * @throws InterruptedException
 */
public static FileSystem get(final URI uri, final Configuration conf,
      final String user) throws IOException, InterruptedException {
  String ticketCachePath =
    conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
  UserGroupInformation ugi =
      UserGroupInformation.getBestUGI(ticketCachePath, user);
  return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws IOException {
      return get(uri, conf);
    }
  });
}
 
Example 12
Source File: AMSimulator.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void lastStep() throws Exception {
  LOG.info(MessageFormat.format("Application {0} is shutting down.", appId));
  // unregister tracking
  if (isTracked) {
    untrackApp();
  }
  // unregister application master
  final FinishApplicationMasterRequest finishAMRequest = recordFactory
                .newRecordInstance(FinishApplicationMasterRequest.class);
  finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);

  UserGroupInformation ugi =
      UserGroupInformation.createRemoteUser(appAttemptId.toString());
  Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps().get(appId)
      .getRMAppAttempt(appAttemptId).getAMRMToken();
  ugi.addTokenIdentifier(token.decodeIdentifier());
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      rm.getApplicationMasterService()
          .finishApplicationMaster(finishAMRequest);
      return null;
    }
  });

  simulateFinishTimeMS = System.currentTimeMillis() -
      SLSRunner.getRunner().getStartTimeMS();
  // record job running information
  ((ResourceSchedulerWrapper)rm.getResourceScheduler())
       .addAMRuntime(appId, 
                    traceStartTimeMS, traceFinishTimeMS, 
                    simulateStartTimeMS, simulateFinishTimeMS);
}
 
Example 13
Source File: KMS.java    From ranger with Apache License 2.0 5 votes vote down vote up
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
    KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getCurrentVersion(@PathParam("name") final String name, @Context HttpServletRequest request)
    throws Exception {
  try {
    if (LOG.isDebugEnabled()) {
      LOG.debug("Entering getCurrentVersion method.");
    }
    UserGroupInformation user = HttpUserGroupInformation.get();
    checkNotEmpty(name, "name");
    KMSWebApp.getKeyCallsMeter().mark();
    assertAccess(Type.GET, user, KMSOp.GET_CURRENT_KEY, name, request.getRemoteAddr());
    LOG.debug("Getting key version for key with name {}.", name);
    KeyVersion keyVersion = user.doAs(new PrivilegedExceptionAction<KeyVersion>() {
      @Override
      public KeyVersion run() throws Exception {
        return provider.getCurrentKey(name);
      }
    });
    Object json = KMSUtil.toJSON(keyVersion);
    kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
    if (LOG.isDebugEnabled()) {
      LOG.debug("Exiting getCurrentVersion method.");
    }
    return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
  } catch (Exception e) {
    LOG.error("Exception in getCurrentVersion.", e);
    throw e;
  }
}
 
Example 14
Source File: TestMiniMRProxyUser.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testValidProxyUser() throws Exception {
  UserGroupInformation ugi = UserGroupInformation.createProxyUser("u1", UserGroupInformation.getLoginUser());
  ugi.doAs(new PrivilegedExceptionAction<Void>() {
      public Void run() throws Exception {
        mrRun();
        return null;
      }

 
  });
}
 
Example 15
Source File: TestClientToAMTokens.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void verifyTamperedToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, UserGroupInformation ugi,
    ClientToAMTokenIdentifier maliciousID) {
  Token<ClientToAMTokenIdentifier> maliciousToken =
      new Token<ClientToAMTokenIdentifier>(maliciousID.getBytes(),
        token.getPassword(), token.getKind(),
        token.getService());
  ugi.addToken(maliciousToken);

  try {
    ugi.doAs(new PrivilegedExceptionAction<Void>()  {
      @Override
      public Void run() throws Exception {
        try {
          CustomProtocol client =
              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
                am.address, conf);
          client.ping();
          fail("Connection initiation with illegally modified "
              + "tokens is expected to fail.");
          return null;
        } catch (YarnException ex) {
          fail("Cannot get a YARN remote exception as "
              + "it will indicate RPC success");
          throw ex;
        }
      }
    });
  } catch (Exception e) {
    Assert.assertEquals(RemoteException.class.getName(), e.getClass()
        .getName());
    e = ((RemoteException)e).unwrapRemoteException();
    Assert
      .assertEquals(SaslException.class
        .getCanonicalName(), e.getClass().getCanonicalName());
    Assert.assertTrue(e
      .getMessage()
      .contains(
        "DIGEST-MD5: digest response format violation. "
            + "Mismatched response."));
    Assert.assertFalse(am.pinged);
  }
}
 
Example 16
Source File: SSLAndKerberosTest.java    From incubator-atlas with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public void setUp() throws Exception {
    jksPath = new Path(Files.createTempDirectory("tempproviders").toString(), "test.jks");
    providerUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file/" + jksPath.toUri();

    String persistDir = TestUtils.getTempDirectory();

    setupKDCAndPrincipals();
    setupCredentials();

    // client will actually only leverage subset of these properties
    final PropertiesConfiguration configuration = getSSLConfiguration(providerUrl);

    persistSSLClientConfiguration(configuration);

    TestUtils.writeConfiguration(configuration, persistDir + File.separator +
        ApplicationProperties.APPLICATION_PROPERTIES);

    String confLocation = System.getProperty("atlas.conf");
    URL url;
    if (confLocation == null) {
        url = SSLAndKerberosTest.class.getResource("/" + ApplicationProperties.APPLICATION_PROPERTIES);
    } else {
        url = new File(confLocation, ApplicationProperties.APPLICATION_PROPERTIES).toURI().toURL();
    }
    configuration.load(url);
    configuration.setProperty(TLS_ENABLED, true);
    configuration.setProperty("atlas.authentication.method.kerberos", "true");
    configuration.setProperty("atlas.authentication.keytab",userKeytabFile.getAbsolutePath());
    configuration.setProperty("atlas.authentication.principal","dgi/localhost@"+kdc.getRealm());

    configuration.setProperty("atlas.authentication.method.file", "false");
    configuration.setProperty("atlas.authentication.method.kerberos", "true");
    configuration.setProperty("atlas.authentication.method.kerberos.principal", "HTTP/localhost@" + kdc.getRealm());
    configuration.setProperty("atlas.authentication.method.kerberos.keytab", httpKeytabFile.getAbsolutePath());
    configuration.setProperty("atlas.authentication.method.kerberos.name.rules",
            "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT");

    configuration.setProperty("atlas.authentication.method.file", "true");
    configuration.setProperty("atlas.authentication.method.file.filename", persistDir
            + "/users-credentials");
    configuration.setProperty("atlas.auth.policy.file",persistDir
            + "/policy-store.txt" );

    TestUtils.writeConfiguration(configuration, persistDir + File.separator +
      "atlas-application.properties");

    setupUserCredential(persistDir);
    setUpPolicyStore(persistDir);

    subject = loginTestUser();
    UserGroupInformation.loginUserFromSubject(subject);
    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(
        "testUser",
        UserGroupInformation.getLoginUser());

    // save original setting
    originalConf = System.getProperty("atlas.conf");
    System.setProperty("atlas.conf", persistDir);

    originalHomeDir = System.getProperty("atlas.home");
    System.setProperty("atlas.home", TestUtils.getTargetDirectory());

    dgiCLient = proxyUser.doAs(new PrivilegedExceptionAction<AtlasClient>() {
        @Override
        public AtlasClient run() throws Exception {
            return new AtlasClient(configuration, DGI_URL);
        }
    });


    secureEmbeddedServer = new TestSecureEmbeddedServer(21443, getWarPath()) {
        @Override
        public PropertiesConfiguration getConfiguration() {
            return configuration;
        }
    };
    secureEmbeddedServer.getServer().start();
}
 
Example 17
Source File: ProxiedFileSystemWrapper.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * Getter for proxiedFs, using the passed parameters to create an instance of a proxiedFs.
 * @param properties
 * @param authType is either TOKEN or KEYTAB.
 * @param authPath is the KEYTAB location if the authType is KEYTAB; otherwise, it is the token file.
 * @param uri File system URI.
 * @throws IOException
 * @throws InterruptedException
 * @throws URISyntaxException
 * @return proxiedFs
 */
public FileSystem getProxiedFileSystem(State properties, AuthType authType, String authPath, String uri, final Configuration conf)
    throws IOException, InterruptedException, URISyntaxException {
  Preconditions.checkArgument(StringUtils.isNotBlank(properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME)),
      "State does not contain a proper proxy user name");
  String proxyUserName = properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
  UserGroupInformation proxyUser;
  switch (authType) {
    case KEYTAB: // If the authentication type is KEYTAB, log in a super user first before creating a proxy user.
      Preconditions.checkArgument(
          StringUtils.isNotBlank(properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS)),
          "State does not contain a proper proxy token file name");
      String superUser = properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
      UserGroupInformation.loginUserFromKeytab(superUser, authPath);
      proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
      break;
    case TOKEN: // If the authentication type is TOKEN, create a proxy user and then add the token to the user.
      proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
      Optional<Token<?>> proxyToken = getTokenFromSeqFile(authPath, proxyUserName);
      if (proxyToken.isPresent()) {
        proxyUser.addToken(proxyToken.get());
      } else {
        LOG.warn("No delegation token found for the current proxy user.");
      }
      break;
    default:
      LOG.warn("Creating a proxy user without authentication, which could not perform File system operations.");
      proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
      break;
  }

  final URI fsURI = URI.create(uri);
  proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws IOException {
      LOG.debug("Now performing file system operations as :" + UserGroupInformation.getCurrentUser());
      proxiedFs = FileSystem.get(fsURI, conf);
      return null;
    }
  });
  return this.proxiedFs;
}
 
Example 18
Source File: HiveMetastoreUtil.java    From datacollector with Apache License 2.0 4 votes vote down vote up
/**
 * Returns the hdfs paths where the avro schema is stored after serializing.
 * Path is appended with current time so as to have an ordering.
 * @param schemaFolder Schema Folder (If this starts with '/' it is considered absolute)
 * @return Hdfs Path String.
 */
public static String serializeSchemaToHDFS(
    UserGroupInformation loginUGI,
    final FileSystem fs,
    final String location,
    final String schemaFolder,
    final String databaseName,
    final String tableName,
    final String schemaJson
) throws StageException {
  String folderLocation;
  if (schemaFolder.startsWith(SEP)) {
    folderLocation = schemaFolder;
  } else {
    folderLocation = location + SEP + schemaFolder;
  }
  final Path schemasFolderPath = new Path(folderLocation);
  final String path =  folderLocation + SEP + String.format(
      AVRO_SCHEMA_FILE_FORMAT,
      databaseName,
      tableName,
      UUID.randomUUID().toString()
  );
  try {
    loginUGI.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception{
        if (!fs.exists(schemasFolderPath)) {
          fs.mkdirs(schemasFolderPath);
        }
        Path schemaFilePath = new Path(path);
        //This will never happen unless two HMS targets are writing, we will error out for this
        //and let user handle this via error record handling.
        if (!fs.exists(schemaFilePath)) {
          try (FSDataOutputStream os = fs.create(schemaFilePath)) {
            byte []schemaBytes = schemaJson.getBytes("UTF-8");
            os.write(schemaBytes, 0, schemaBytes.length);
          }
        } else {
          LOG.error(Utils.format("Already schema file {} exists in HDFS", path));
          throw new IOException("Already schema file exists");
        }
        return null;
      }
    });
  } catch (Exception e) {
    LOG.error("Error in Writing Schema to HDFS: " + e.toString(), e);
    throw new StageException(Errors.HIVE_18, path, e.getMessage());
  }
  return path;
}
 
Example 19
Source File: GetHDFSFileInfo.java    From nifi with Apache License 2.0 4 votes vote down vote up
protected HDFSObjectInfoDetails walkHDFSTree(final ProcessContext context, final ProcessSession session, ExecutionContext executionContext,
         FlowFile origFF, final FileSystem hdfs, final UserGroupInformation ugi, final HDFSFileInfoRequest req, HDFSObjectInfoDetails parent,
         final boolean statsOnly
) throws Exception{

    final HDFSObjectInfoDetails p = parent;

    if (!ugi.doAs((PrivilegedExceptionAction<Boolean>) () -> hdfs.exists(p != null ? p.getPath() : new Path(req.fullPath)))) {
            return null;
    }

    if (parent == null) {
        parent = new HDFSObjectInfoDetails(ugi.doAs((PrivilegedExceptionAction<FileStatus>) () -> hdfs.getFileStatus(new Path(req.fullPath))));
    }
    if (parent.isFile() && p == null) {
        //single file path requested and found, lets send to output:
        processHDFSObject(context, session, executionContext, origFF, req, parent, true);
        return parent;
    }

    final Path path = parent.getPath();

    FileStatus[] listFSt = null;
    try {
        listFSt = ugi.doAs((PrivilegedExceptionAction<FileStatus[]>) () -> hdfs.listStatus(path));
    }catch (IOException e) {
        parent.error = "Couldn't list directory: " + e;
        processHDFSObject(context, session, executionContext, origFF, req, parent, p == null);
        return parent; //File not found exception, or access denied - don't interrupt, just don't list
    }
    if (listFSt != null) {
        for (FileStatus f : listFSt) {
            HDFSObjectInfoDetails o = new HDFSObjectInfoDetails(f);
            HDFSObjectInfoDetails vo = validateMatchingPatterns(o, req);
            if (o.isDirectory() && !o.isSymlink() && req.isRecursive) {
                o = walkHDFSTree(context, session, executionContext, origFF, hdfs, ugi, req, o, vo == null || statsOnly);
                parent.countDirs += o.countDirs;
                parent.totalLen += o.totalLen;
                parent.countFiles += o.countFiles;
            }else if (o.isDirectory() && o.isSymlink()) {
                parent.countDirs += 1;
            }else if (o.isFile() && !o.isSymlink()) {
                parent.countFiles += 1;
                parent.totalLen += o.getLen();
            }else if (o.isFile() && o.isSymlink()) {
                parent.countFiles += 1; // do not add length of the symlink, as it doesn't consume space under THIS directory, but count files, as it is still an object.
            }

            // Decide what to do with child: if requested FF per object or per dir - just emit new FF with info in 'o' object
            if (vo != null && !statsOnly) {
                parent.addChild(vo);
                if (vo.isFile() && !vo.isSymlink()) {
                    processHDFSObject(context, session, executionContext, origFF, req, vo, false);
                }
            }
        }
        if (!statsOnly) {
            processHDFSObject(context, session, executionContext, origFF, req, parent, p==null);
        }
        if (req.groupping != Groupping.ALL) {
            parent.setChildren(null); //we need children in full tree only when single output requested.
        }
    }

    return parent;
}
 
Example 20
Source File: AMSimulator.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void submitApp()
        throws YarnException, InterruptedException, IOException {
  // ask for new application
  GetNewApplicationRequest newAppRequest =
      Records.newRecord(GetNewApplicationRequest.class);
  GetNewApplicationResponse newAppResponse = 
      rm.getClientRMService().getNewApplication(newAppRequest);
  appId = newAppResponse.getApplicationId();
  
  // submit the application
  final SubmitApplicationRequest subAppRequest =
      Records.newRecord(SubmitApplicationRequest.class);
  ApplicationSubmissionContext appSubContext = 
      Records.newRecord(ApplicationSubmissionContext.class);
  appSubContext.setApplicationId(appId);
  appSubContext.setMaxAppAttempts(1);
  appSubContext.setQueue(queue);
  appSubContext.setPriority(Priority.newInstance(0));
  ContainerLaunchContext conLauContext = 
      Records.newRecord(ContainerLaunchContext.class);
  conLauContext.setApplicationACLs(
      new HashMap<ApplicationAccessType, String>());
  conLauContext.setCommands(new ArrayList<String>());
  conLauContext.setEnvironment(new HashMap<String, String>());
  conLauContext.setLocalResources(new HashMap<String, LocalResource>());
  conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
  appSubContext.setAMContainerSpec(conLauContext);
  appSubContext.setUnmanagedAM(true);
  subAppRequest.setApplicationSubmissionContext(appSubContext);
  UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
  ugi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws YarnException {
      rm.getClientRMService().submitApplication(subAppRequest);
      return null;
    }
  });
  LOG.info(MessageFormat.format("Submit a new application {0}", appId));
  
  // waiting until application ACCEPTED
  RMApp app = rm.getRMContext().getRMApps().get(appId);
  while(app.getState() != RMAppState.ACCEPTED) {
    Thread.sleep(10);
  }

  // Waiting until application attempt reach LAUNCHED
  // "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
  this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt().getAppAttemptId();
  RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
      .getCurrentAppAttempt();
  while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
    Thread.sleep(10);
  }
}