Java Code Examples for org.apache.hadoop.security.UserGroupInformation.doAs()

The following are Jave code examples for showing how to use doAs() of the org.apache.hadoop.security.UserGroupInformation class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestClientRMTokens.java   View Source Code Vote up 6 votes
private org.apache.hadoop.yarn.api.records.Token getDelegationToken(
    final UserGroupInformation loggedInUser,
    final ApplicationClientProtocol clientRMService, final String renewerString)
    throws IOException, InterruptedException {
  org.apache.hadoop.yarn.api.records.Token token = loggedInUser
      .doAs(new PrivilegedExceptionAction<org.apache.hadoop.yarn.api.records.Token>() {
        @Override
          public org.apache.hadoop.yarn.api.records.Token run()
              throws YarnException, IOException {
          GetDelegationTokenRequest request = Records
              .newRecord(GetDelegationTokenRequest.class);
          request.setRenewer(renewerString);
          return clientRMService.getDelegationToken(request)
              .getRMDelegationToken();
        }
      });
  return token;
}
 
Example 2
Project: hadoop   File: ClientCache.java   View Source Code Vote up 6 votes
protected MRClientProtocol instantiateHistoryProxy()
    throws IOException {
  final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
  if (StringUtils.isEmpty(serviceAddr)) {
    return null;
  }
  LOG.debug("Connecting to HistoryServer at: " + serviceAddr);
  final YarnRPC rpc = YarnRPC.create(conf);
  LOG.debug("Connected to HistoryServer at: " + serviceAddr);
  UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
  return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
    @Override
    public MRClientProtocol run() {
      return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
          NetUtils.createSocketAddr(serviceAddr), conf);
    }
  });
}
 
Example 3
Project: hadoop   File: TestClientToAMTokens.java   View Source Code Vote up 6 votes
private void verifyNewVersionToken(final Configuration conf, final CustomAM am,
    Token<ClientToAMTokenIdentifier> token, MockRM rm) throws IOException,
    InterruptedException {
  UserGroupInformation ugi;
  ugi = UserGroupInformation.createRemoteUser("me");
  
  Token<ClientToAMTokenIdentifier> newToken = 
      new Token<ClientToAMTokenIdentifier>(
          new ClientToAMTokenIdentifierForTest(token.decodeIdentifier(), "message"),
          am.getClientToAMTokenSecretManager());
  newToken.setService(token.getService());
  
  ugi.addToken(newToken);

  ugi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      CustomProtocol client =
          (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L, am.address,
            conf);
      client.ping();
      Assert.assertTrue(am.pinged);
      return null;
    }
  });
}
 
Example 4
Project: hadoop   File: KMS.java   View Source Code Vote up 6 votes
@GET
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
    List<String> keyNamesList) throws Exception {
  KMSWebApp.getAdminCallsMeter().mark();
  UserGroupInformation user = HttpUserGroupInformation.get();
  final String[] keyNames = keyNamesList.toArray(
      new String[keyNamesList.size()]);
  assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);

  KeyProvider.Metadata[] keysMeta = user.doAs(
      new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
        @Override
        public KeyProvider.Metadata[] run() throws Exception {
          return provider.getKeysMetadata(keyNames);
        }
      }
  );

  Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
  kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
  return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
 
Example 5
Project: hadoop   File: TestSecureLogins.java   View Source Code Vote up 6 votes
@Test
public void testUGILogin() throws Throwable {

  UserGroupInformation ugi = loginUGI(ZOOKEEPER, keytab_zk);
  RegistrySecurity.UgiInfo ugiInfo =
      new RegistrySecurity.UgiInfo(ugi);
  LOG.info("logged in as: {}", ugiInfo);
  assertTrue("security is not enabled: " + ugiInfo,
      UserGroupInformation.isSecurityEnabled());
  assertTrue("login is keytab based: " + ugiInfo,
      ugi.isFromKeytab());

  // now we are here, build a SASL ACL
  ACL acl = ugi.doAs(new PrivilegedExceptionAction<ACL>() {
    @Override
    public ACL run() throws Exception {
      return registrySecurity.createSaslACLFromCurrentUser(0);
    }
  });
  assertEquals(ZOOKEEPER_REALM, acl.getId().getId());
  assertEquals(ZookeeperConfigOptions.SCHEME_SASL, acl.getId().getScheme());
  registrySecurity.addSystemACL(acl);

}
 
Example 6
Project: hadoop   File: AMLauncher.java   View Source Code Vote up 5 votes
protected ContainerManagementProtocol getContainerMgrProxy(
    final ContainerId containerId) {

  final NodeId node = masterContainer.getNodeId();
  final InetSocketAddress containerManagerBindAddress =
      NetUtils.createSocketAddrForHost(node.getHost(), node.getPort());

  final YarnRPC rpc = YarnRPC.create(conf); // TODO: Don't create again and again.

  UserGroupInformation currentUser =
      UserGroupInformation.createRemoteUser(containerId
          .getApplicationAttemptId().toString());

  String user =
      rmContext.getRMApps()
          .get(containerId.getApplicationAttemptId().getApplicationId())
          .getUser();
  org.apache.hadoop.yarn.api.records.Token token =
      rmContext.getNMTokenSecretManager().createNMToken(
          containerId.getApplicationAttemptId(), node, user);
  currentUser.addToken(ConverterUtils.convertFromYarn(token,
      containerManagerBindAddress));

  return currentUser
      .doAs(new PrivilegedAction<ContainerManagementProtocol>() {

        @Override
        public ContainerManagementProtocol run() {
          return (ContainerManagementProtocol) rpc.getProxy(
              ContainerManagementProtocol.class,
              containerManagerBindAddress, conf);
        }
      });
}
 
Example 7
Project: angel   File: AngelRunJar.java   View Source Code Vote up 5 votes
public static void submit(Configuration conf) throws Exception {
  LOG.info("angel python file: " + conf.get("angel.pyangel.pyfile"));
  if (null != conf.get("angel.pyangel.pyfile")) {
    conf.set(AngelConf.ANGEL_APP_SUBMIT_CLASS, "com.tencent.angel.api.python.PythonRunner");
  }
  // instance submitter class
  final String submitClassName =
    conf.get(AngelConf.ANGEL_APP_SUBMIT_CLASS, AngelConf.DEFAULT_ANGEL_APP_SUBMIT_CLASS);
  UserGroupInformation ugi = UGITools.getCurrentUser(conf);
  ugi.doAs(new PrivilegedExceptionAction<String>() {
    @Override public String run() throws Exception {
      AppSubmitter submmiter = null;
      try {
        Class<?> submitClass = Class.forName(submitClassName);
        submmiter = (AppSubmitter) submitClass.newInstance();
        LOG.info("submitClass: " + submitClass.getName());
      } catch (Exception x) {
        String message = "load submit class failed " + x.getMessage();
        LOG.fatal(message, x);
        throw new InvalidParameterException(message);
      }

      submmiter.submit(conf);
      return "OK";
    }
  });
}
 
Example 8
Project: hadoop   File: TestDelegationTokensWithHA.java   View Source Code Vote up 5 votes
@Test(timeout = 300000)
public void testDelegationTokenWithDoAs() throws Exception {
  final Token<DelegationTokenIdentifier> token =
      getDelegationToken(fs, "JobTracker");
  final UserGroupInformation longUgi = UserGroupInformation
      .createRemoteUser("JobTracker/[email protected]");
  final UserGroupInformation shortUgi = UserGroupInformation
      .createRemoteUser("JobTracker");
  longUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      // try renew with long name
      token.renew(conf);
      return null;
    }
  });
  shortUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      token.renew(conf);
      return null;
    }
  });
  longUgi.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      token.cancel(conf);;
      return null;
    }
  });
}
 
Example 9
Project: scheduling-connector-for-hadoop   File: HPCLogAggregateHandler.java   View Source Code Vote up 5 votes
private static void initAndStartAggregation(final Configuration conf,
    String appUser, final HPCLogAggregateHandler aggregateHandler)
    throws IOException, InterruptedException {
  UserGroupInformation logAggregatorUgi = UserGroupInformation
      .createRemoteUser(appUser);
  logAggregatorUgi.doAs(new PrivilegedExceptionAction<Object>() {
    @Override
    public Object run() throws Exception {
      aggregateHandler.init(conf);
      aggregateHandler.start();
      return null;
    }
  });
}
 
Example 10
Project: QDrill   File: HiveScan.java   View Source Code Vote up 5 votes
private void getSplitsWithUGI() throws ExecutionSetupException {
  final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(getUserName());
  try {
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
      public Void run() throws Exception {
        getSplits();
        return null;
      }
    });
  } catch (final InterruptedException | IOException e) {
    final String errMsg = String.format("Failed to create input splits: %s", e.getMessage());
    logger.error(errMsg, e);
    throw new DrillRuntimeException(errMsg, e);
  }
}
 
Example 11
Project: dremio-oss   File: FileSystemDatasetAccessor.java   View Source Code Vote up 5 votes
protected DatasetConfig getDatasetInternal(final FileSystemWrapper fs, final FileSelection selection, List<String> tableSchemaPath) {
  final UserGroupInformation processUGI = ImpersonationUtil.getProcessUserUGI();
  try {
    BatchSchema newSchema = processUGI.doAs(
      new PrivilegedExceptionAction<BatchSchema>() {
        @Override
        public BatchSchema run() throws Exception {
          final Stopwatch watch = Stopwatch.createStarted();
          try {
            return getBatchSchema(selection, fs);
          } finally {
            logger.debug("Took {} ms to sample the schema of table located at: {}",
              watch.elapsed(TimeUnit.MILLISECONDS), selection.getSelectionRoot());
          }
        }
      }
    );
    DatasetType type = fs.isDirectory(new Path(selection.getSelectionRoot())) ?
      DatasetType.PHYSICAL_DATASET_SOURCE_FOLDER :
      DatasetType.PHYSICAL_DATASET_SOURCE_FILE;
    // Merge sampled schema into the existing one, if one already exists
    BatchSchema schema = newSchema;
    if (oldConfig != null && DatasetHelper.getSchemaBytes(oldConfig) != null) {
      schema = BatchSchema.fromDataset(oldConfig).merge(newSchema);
    }
    return new DatasetConfig()
      .setName(tableSchemaPath.get(tableSchemaPath.size() - 1))
      .setType(type)
      .setFullPathList(tableSchemaPath)
      .setSchemaVersion(DatasetHelper.CURRENT_VERSION)
      .setRecordSchema(schema.toByteString())
      .setPhysicalDataset(new PhysicalDataset()
        .setFormatSettings(toFileFormat(formatPlugin).asFileConfig().setLocation(selection.getSelectionRoot())));

  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example 12
Project: hadoop   File: TestFileSystem.java   View Source Code Vote up 5 votes
public void testFsCache() throws Exception {
  {
    long now = System.currentTimeMillis();
    String[] users = new String[]{"foo","bar"};
    final Configuration conf = new Configuration();
    FileSystem[] fs = new FileSystem[users.length];

    for(int i = 0; i < users.length; i++) {
      UserGroupInformation ugi = UserGroupInformation.createRemoteUser(users[i]);
      fs[i] = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
          return FileSystem.get(conf);
      }});
      for(int j = 0; j < i; j++) {
        assertFalse(fs[j] == fs[i]);
      }
    }
    FileSystem.closeAll();
  }
  
  {
    try {
      runTestCache(NameNode.DEFAULT_PORT);
    } catch(java.net.BindException be) {
      LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
          + NameNode.DEFAULT_PORT + ")", be);
    }

    runTestCache(0);
  }
}
 
Example 13
Project: hadoop   File: TestDelegationToken.java   View Source Code Vote up 5 votes
@SuppressWarnings("unchecked")
private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
    throws Exception {
  Assert.assertNotNull(token);
  // should be able to use token.decodeIdentifier() but webhdfs isn't
  // registered with the service loader for token decoding
  DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
  byte[] tokenId = token.getIdentifier();
  DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
  try {
    identifier.readFields(in);
  } finally {
    in.close();
  }
  Assert.assertNotNull(identifier);
  LOG.info("A valid token should have non-null password, and should be renewed successfully");
  Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
  dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
  ugi.doAs(
      new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          token.renew(config);
          token.cancel(config);
          return null;
        }
      });
}
 
Example 14
Project: hadoop   File: NamenodeWebHdfsMethods.java   View Source Code Vote up 5 votes
/** Handle HTTP DELETE request. */
@DELETE
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
    @Context final UserGroupInformation ugi,
    @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
        final DelegationParam delegation,
    @QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
        final UserParam username,
    @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
        final DoAsParam doAsUser,
    @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
    @QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
        final DeleteOpParam op,
    @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
        final RecursiveParam recursive,
    @QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
        final SnapshotNameParam snapshotName
    ) throws IOException, InterruptedException {

  init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);

  return ugi.doAs(new PrivilegedExceptionAction<Response>() {
    @Override
    public Response run() throws IOException {
      try {
        return delete(ugi, delegation, username, doAsUser,
            path.getAbsolutePath(), op, recursive, snapshotName);
      } finally {
        reset();
      }
    }
  });
}
 
Example 15
Project: hadoop   File: FileSystem.java   View Source Code Vote up 5 votes
/**
 * Returns the FileSystem for this URI's scheme and authority and the 
 * passed user. Internally invokes {@link #newInstance(URI, Configuration)}
 * @param uri of the filesystem
 * @param conf the configuration to use
 * @param user to perform the get as
 * @return filesystem instance
 * @throws IOException
 * @throws InterruptedException
 */
public static FileSystem newInstance(final URI uri, final Configuration conf,
    final String user) throws IOException, InterruptedException {
  String ticketCachePath =
    conf.get(CommonConfigurationKeys.KERBEROS_TICKET_CACHE_PATH);
  UserGroupInformation ugi =
      UserGroupInformation.getBestUGI(ticketCachePath, user);
  return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
    @Override
    public FileSystem run() throws IOException {
      return newInstance(uri,conf); 
    }
  });
}
 
Example 16
Project: hadoop   File: TestSnapshotDeletion.java   View Source Code Vote up 5 votes
@Test
public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
  cluster.shutdown();
  Configuration newConf = new Configuration(conf);
  newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
  cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();

  final Path path = new Path("/dir");
  hdfs.mkdirs(path);
  hdfs.allowSnapshot(path);
  hdfs.mkdirs(new Path(path, "/test"));
  hdfs.createSnapshot(path, "s1");
  UserGroupInformation anotherUser = UserGroupInformation
      .createRemoteUser("anotheruser");
  anotherUser.doAs(new PrivilegedAction<Object>() {
    @Override
    public Object run() {
      DistributedFileSystem anotherUserFS = null;
      try {
        anotherUserFS = cluster.getFileSystem();
        anotherUserFS.deleteSnapshot(path, "s1");
      } catch (IOException e) {
        fail("Failed to delete snapshot : " + e.getLocalizedMessage());
      } finally {
        IOUtils.closeStream(anotherUserFS);
      }
      return null;
    }
  });
}
 
Example 17
Project: hadoop   File: RMWebServices.java   View Source Code Vote up 4 votes
protected Response killApp(RMApp app, UserGroupInformation callerUGI,
    HttpServletRequest hsr) throws IOException, InterruptedException {

  if (app == null) {
    throw new IllegalArgumentException("app cannot be null");
  }
  String userName = callerUGI.getUserName();
  final ApplicationId appid = app.getApplicationId();
  KillApplicationResponse resp = null;
  try {
    resp =
        callerUGI
          .doAs(new PrivilegedExceptionAction<KillApplicationResponse>() {
            @Override
            public KillApplicationResponse run() throws IOException,
                YarnException {
              KillApplicationRequest req =
                  KillApplicationRequest.newInstance(appid);
              return rm.getClientRMService().forceKillApplication(req);
            }
          });
  } catch (UndeclaredThrowableException ue) {
    // if the root cause is a permissions issue
    // bubble that up to the user
    if (ue.getCause() instanceof YarnException) {
      YarnException ye = (YarnException) ue.getCause();
      if (ye.getCause() instanceof AccessControlException) {
        String appId = app.getApplicationId().toString();
        String msg =
            "Unauthorized attempt to kill appid " + appId
                + " by remote user " + userName;
        return Response.status(Status.FORBIDDEN).entity(msg).build();
      } else {
        throw ue;
      }
    } else {
      throw ue;
    }
  }

  AppState ret = new AppState();
  ret.setState(app.getState().toString());

  if (resp.getIsKillCompleted()) {
    RMAuditLogger.logSuccess(userName, AuditConstants.KILL_APP_REQUEST,
      "RMWebService", app.getApplicationId());
  } else {
    return Response.status(Status.ACCEPTED).entity(ret)
      .header(HttpHeaders.LOCATION, hsr.getRequestURL()).build();
  }
  return Response.status(Status.OK).entity(ret).build();
}
 
Example 18
Project: hadoop   File: KMS.java   View Source Code Vote up 4 votes
@SuppressWarnings("rawtypes")
@POST
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
    KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response decryptEncryptedKey(
    @PathParam("versionName") final String versionName,
    @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
    Map jsonPayload)
    throws Exception {
  UserGroupInformation user = HttpUserGroupInformation.get();
  KMSClientProvider.checkNotEmpty(versionName, "versionName");
  KMSClientProvider.checkNotNull(eekOp, "eekOp");

  final String keyName = (String) jsonPayload.get(
      KMSRESTConstants.NAME_FIELD);
  String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
  String encMaterialStr = 
      (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
  Object retJSON;
  if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
    assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
    KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
    final byte[] iv = Base64.decodeBase64(ivStr);
    KMSClientProvider.checkNotNull(encMaterialStr,
        KMSRESTConstants.MATERIAL_FIELD);
    final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);

    KeyProvider.KeyVersion retKeyVersion = user.doAs(
        new PrivilegedExceptionAction<KeyVersion>() {
          @Override
          public KeyVersion run() throws Exception {
            return provider.decryptEncryptedKey(
                new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
                    versionName, iv, KeyProviderCryptoExtension.EEK,
                    encMaterial)
            );
          }
        }
    );

    retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
    kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
  } else {
    throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
        " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
        KMSRESTConstants.EEK_DECRYPT);
  }
  KMSWebApp.getDecryptEEKCallsMeter().mark();
  return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
      .build();
}
 
Example 19
Project: hadoop   File: TestCopyFiles.java   View Source Code Vote up 4 votes
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 20
Project: hadoop   File: RMWebServices.java   View Source Code Vote up 4 votes
private Response renewDelegationToken(DelegationToken tokenData,
    HttpServletRequest hsr, UserGroupInformation callerUGI)
    throws AuthorizationException, IOException, InterruptedException,
    Exception {

  Token<RMDelegationTokenIdentifier> token =
      extractToken(tokenData.getToken());

  org.apache.hadoop.yarn.api.records.Token dToken =
      BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
        .toString(), token.getPassword(), token.getService().toString());
  final RenewDelegationTokenRequest req =
      RenewDelegationTokenRequest.newInstance(dToken);

  RenewDelegationTokenResponse resp;
  try {
    resp =
        callerUGI
          .doAs(new PrivilegedExceptionAction<RenewDelegationTokenResponse>() {
            @Override
            public RenewDelegationTokenResponse run() throws IOException,
                YarnException {
              return rm.getClientRMService().renewDelegationToken(req);
            }
          });
  } catch (UndeclaredThrowableException ue) {
    if (ue.getCause() instanceof YarnException) {
      if (ue.getCause().getCause() instanceof InvalidToken) {
        throw new BadRequestException(ue.getCause().getCause().getMessage());
      } else if (ue.getCause().getCause() instanceof org.apache.hadoop.security.AccessControlException) {
        return Response.status(Status.FORBIDDEN)
          .entity(ue.getCause().getCause().getMessage()).build();
      }
      LOG.info("Renew delegation token request failed", ue);
      throw ue;
    }
    LOG.info("Renew delegation token request failed", ue);
    throw ue;
  } catch (Exception e) {
    LOG.info("Renew delegation token request failed", e);
    throw e;
  }
  long renewTime = resp.getNextExpirationTime();

  DelegationToken respToken = new DelegationToken();
  respToken.setNextExpirationTime(renewTime);
  return Response.status(Status.OK).entity(respToken).build();
}