Java Code Examples for org.apache.hadoop.conf.Configuration#getInstances()

The following examples show how to use org.apache.hadoop.conf.Configuration#getInstances() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: datawave   File: DataTypeOverrideHelper.java    License: Apache License 2.0 6 votes vote down vote up
public void setup(Configuration config) throws IllegalArgumentException {
    super.setup(config);
    
    eventDataTypeFieldName = config.get(this.getType().typeName() + Properties.EVENT_DATA_TYPE_FIELD_NAME);
    
    if (eventDataTypeFieldName != null) {
        String[] eventDataTypeKeys = config.get(this.getType().typeName() + Properties.DATA_TYPE_KEYS, "").split(",");
        String[] eventDataTypeValues = config.get(this.getType().typeName() + Properties.DATA_TYPE_VALUES, "").split(",");
        
        if (eventDataTypeKeys.length != eventDataTypeValues.length) {
            throw new IllegalArgumentException("Both " + this.getType().typeName() + Properties.DATA_TYPE_KEYS + " and " + this.getType().typeName()
                            + Properties.DATA_TYPE_VALUES + " must contain the same number of values.");
        }
        
        for (int i = 0; i < eventDataTypeKeys.length; i++)
            this.eventDataTypeMap.put(eventDataTypeKeys[i].trim(), eventDataTypeValues[i].trim());
    }
    
    if (config.get(Properties.FILTER_PROP) != null) {
        filters = config.getInstances(Properties.FILTER_PROP, RecordFilter.class);
        for (RecordFilter filter : filters) {
            filter.initialize(config);
        }
    }
}
 
Example 2
Source Project: hadoop   File: NameNode.java    License: Apache License 2.0 6 votes vote down vote up
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
 
Example 3
Source Project: hadoop   File: TestIdentityProviders.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPluggableIdentityProvider() {
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    "org.apache.hadoop.ipc.UserIdentityProvider");

  List<IdentityProvider> providers = conf.getInstances(
    CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    IdentityProvider.class);

  assertTrue(providers.size() == 1);

  IdentityProvider ip = providers.get(0);
  assertNotNull(ip);
  assertEquals(ip.getClass(), UserIdentityProvider.class);
}
 
Example 4
Source Project: big-c   File: NameNode.java    License: Apache License 2.0 6 votes vote down vote up
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
  namesystem.startCommonServices(conf, haContext);
  registerNNSMXBean();
  if (NamenodeRole.NAMENODE != role) {
    startHttpServer(conf);
    httpServer.setNameNodeAddress(getNameNodeAddress());
    httpServer.setFSImage(getFSImage());
  }
  rpcServer.start();
  plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
      ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
  LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
  if (rpcServer.getServiceRpcAddress() != null) {
    LOG.info(getRole() + " service RPC up at: "
        + rpcServer.getServiceRpcAddress());
  }
}
 
Example 5
Source Project: big-c   File: TestIdentityProviders.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPluggableIdentityProvider() {
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    "org.apache.hadoop.ipc.UserIdentityProvider");

  List<IdentityProvider> providers = conf.getInstances(
    CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    IdentityProvider.class);

  assertTrue(providers.size() == 1);

  IdentityProvider ip = providers.get(0);
  assertNotNull(ip);
  assertEquals(ip.getClass(), UserIdentityProvider.class);
}
 
Example 6
Source Project: hadoop   File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
 
Example 7
Source Project: hadoop   File: DecayRpcScheduler.java    License: Apache License 2.0 5 votes vote down vote up
private IdentityProvider parseIdentityProvider(String ns, Configuration conf) {
  List<IdentityProvider> providers = conf.getInstances(
    ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    IdentityProvider.class);

  if (providers.size() < 1) {
    LOG.info("IdentityProvider not specified, " +
      "defaulting to UserIdentityProvider");
    return new UserIdentityProvider();
  }

  return providers.get(0); // use the first
}
 
Example 8
Source Project: big-c   File: DataNode.java    License: Apache License 2.0 5 votes vote down vote up
private void startPlugins(Configuration conf) {
  plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
  for (ServicePlugin p: plugins) {
    try {
      p.start(this);
      LOG.info("Started plug-in " + p);
    } catch (Throwable t) {
      LOG.warn("ServicePlugin " + p + " could not be started", t);
    }
  }
}
 
Example 9
Source Project: big-c   File: DecayRpcScheduler.java    License: Apache License 2.0 5 votes vote down vote up
private IdentityProvider parseIdentityProvider(String ns, Configuration conf) {
  List<IdentityProvider> providers = conf.getInstances(
    ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
    IdentityProvider.class);

  if (providers.size() < 1) {
    LOG.info("IdentityProvider not specified, " +
      "defaulting to UserIdentityProvider");
    return new UserIdentityProvider();
  }

  return providers.get(0); // use the first
}
 
Example 10
private List<OutputFormat> getNewApiFormats(Configuration cfg) {
    if (newApiFormat == null) {
        newApiFormat = cfg.getInstances(CFG_FIELD, OutputFormat.class);
    }
    return newApiFormat;
}
 
Example 11
private List<org.apache.hadoop.mapred.OutputFormat> getOldApiFormats(Configuration cfg) {
    if (oldApiFormat == null) {
        oldApiFormat = cfg.getInstances(CFG_FIELD, org.apache.hadoop.mapred.OutputFormat.class);
    }
    return oldApiFormat;
}