Java Code Examples for org.apache.flume.Context#getInteger()

The following examples show how to use org.apache.flume.Context#getInteger() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractHDFSWriter.java    From Transwarp-Sample-Code with MIT License 6 votes vote down vote up
@Override
public void configure(Context context) {
  configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
  if (configuredMinReplicas != null) {
    Preconditions.checkArgument(configuredMinReplicas >= 0,
        "hdfs.minBlockReplicas must be greater than or equal to 0");
  }
  numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;

  if (numberOfCloseRetries > 1) {
    try {
      timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
    } catch (NumberFormatException e) {
      logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
    }
    timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries/numberOfCloseRetries, 1000);
  }

}
 
Example 2
Source File: PseudoTxnMemoryChannel.java    From mt-flume with Apache License 2.0 6 votes vote down vote up
@Override
public void configure(Context context) {
  Integer capacity = context.getInteger("capacity");
  keepAlive = context.getInteger("keep-alive");

  if (capacity == null) {
    capacity = defaultCapacity;
  }

  if (keepAlive == null) {
    keepAlive = defaultKeepAlive;
  }

  queue = new ArrayBlockingQueue<Event>(capacity);
  if(channelCounter == null) {
    channelCounter = new ChannelCounter(getName());
  }
}
 
Example 3
Source File: JettyWebSocketSource.java    From sequenceiq-samples with Apache License 2.0 6 votes vote down vote up
@Override
protected void doConfigure(Context context) throws FlumeException {
    ensureRequiredNonNull(context, HOST_KEY, PORT_KEY, PATH_KEY);

    this.host = context.getString(HOST_KEY);
    this.port = context.getInteger(PORT_KEY);
    this.path = context.getString(PATH_KEY);
    this.enableSsl = context.getBoolean(SSL_KEY, false);
    this.keystore = context.getString(KEYSTORE_KEY);
    this.keystorePassword = context.getString(KEYSTORE_PASSWORD_KEY);

    if (enableSsl) {
        checkNotNull(keystore, KEYSTORE_KEY + " must be specified when SSL is enabled");
        checkNotNull(keystorePassword, KEYSTORE_PASSWORD_KEY + " must be specified when SSL is enabled");
    }
}
 
Example 4
Source File: PeriodicEmissionSource.java    From flume-interceptor-analytics with Apache License 2.0 6 votes vote down vote up
/** {@inheritDoc} */
@Override
public void configure(Context context) {
  this.emitFreqMS = context.getInteger(EMIT_FREQ_MS);
  try {
    this.interceptorClass = Class.forName(context.getString(INTERCEPTOR_CLASS));
  } catch (ClassNotFoundException e) {
    throw new IllegalArgumentException(e);
  }
  if (!AnalyticInterceptor.class.isAssignableFrom(interceptorClass)) {
    throw new IllegalArgumentException(
        "interceptorClass must implement the AnalyticInterceptor interface");
  }
  LOG.info(String.format(
    "Initializing PeriodicEmissionSource: emitFreqMS=%d, interceptorClass=%s", emitFreqMS,
    interceptorClass));
}
 
Example 5
Source File: HDFSStorage.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
/**
 * This stores the Identifier information identified in the last store function call
 *
 * @param ctx
 */
@Override
public void configure(Context ctx)
{
  String tempId = ctx.getString(ID);
  if (tempId == null) {
    if (id == null) {
      throw new IllegalArgumentException("id can't be  null.");
    }
  } else {
    id = tempId;
  }

  String tempBaseDir = ctx.getString(BASE_DIR_KEY);
  if (tempBaseDir != null) {
    baseDir = tempBaseDir;
  }

  restore = ctx.getBoolean(RESTORE_KEY, restore);
  Long tempBlockSize = ctx.getLong(BLOCKSIZE);
  if (tempBlockSize != null) {
    blockSize = tempBlockSize;
  }
  blockSizeMultiple = ctx.getInteger(BLOCK_SIZE_MULTIPLE, blockSizeMultiple);
  retryCount = ctx.getInteger(NUMBER_RETRY,retryCount);
}
 
Example 6
Source File: BulkProcessorBulider.java    From flume-elasticsearch-sink with Apache License 2.0 6 votes vote down vote up
public BulkProcessor buildBulkProcessor(Context context, ElasticSearchSink elasticSearchSink) {
    this.elasticSearchSink = elasticSearchSink;
    bulkActions = context.getInteger(ES_BULK_ACTIONS,
            DEFAULT_ES_BULK_ACTIONS);
    bulkProcessorName = context.getString(ES_BULK_PROCESSOR_NAME,
            DEFAULT_ES_BULK_PROCESSOR_NAME);
    bulkSize = Util.getByteSizeValue(context.getInteger(ES_BULK_SIZE),
            context.getString(ES_BULK_SIZE_UNIT));
    concurrentRequest = context.getInteger(ES_CONCURRENT_REQUEST,
            DEFAULT_ES_CONCURRENT_REQUEST);
    flushIntervalTime = Util.getTimeValue(context.getString(ES_FLUSH_INTERVAL_TIME),
            DEFAULT_ES_FLUSH_INTERVAL_TIME);
    backoffPolicyTimeInterval = context.getString(ES_BACKOFF_POLICY_TIME_INTERVAL,
            DEFAULT_ES_BACKOFF_POLICY_START_DELAY);
    backoffPolicyRetries = context.getInteger(ES_BACKOFF_POLICY_RETRIES,
            DEFAULT_ES_BACKOFF_POLICY_RETRIES);
    return build(elasticSearchSink.getClient());
}
 
Example 7
Source File: MongoSink.java    From ingestion with Apache License 2.0 5 votes vote down vote up
/**
 * {@inheritDoc}
 *
 * @param context
 */
@Override
public void configure(Context context) {
    try {
        if (!"INJECTED".equals(context.getString(CONF_URI))) {
            this.mongoClientURI = new MongoClientURI(
                    context.getString(CONF_URI),
                    MongoClientOptions.builder().writeConcern(WriteConcern.SAFE)
            );
            this.mongoClient = new MongoClient(mongoClientURI);
            if (mongoClientURI.getDatabase() != null) {
                this.mongoDefaultDb = mongoClient.getDB(mongoClientURI.getDatabase());
            }
            if (mongoClientURI.getCollection() != null) {
                this.mongoDefaultCollection = mongoDefaultDb.getCollection(mongoClientURI.getCollection());
            }
        }
        final String mappingFilename = context.getString(CONF_MAPPING_FILE);
        this.eventParser = (mappingFilename == null) ?
                new EventParser()
                :
                new EventParser(MappingDefinition.load(mappingFilename));

        this.isDynamicMode = context.getBoolean(CONF_DYNAMIC, DEFAULT_DYNAMIC);
        if (!isDynamicMode && mongoDefaultCollection == null) {
            throw new MongoSinkException("Default MongoDB collection must be specified unless dynamic mode is enabled");
        }
        this.dynamicDBField = context.getString(CONF_DYNAMIC_DB_FIELD, DEFAULT_DYNAMIC_DB_FIELD);
        this.dynamicCollectionField = context.getString(CONF_DYNAMIC_COLLECTION_FIELD, DEFAULT_DYNAMIC_COLLECTION_FIELD);

        this.sinkCounter = new SinkCounter(this.getName());
        this.batchSize = context.getInteger(CONF_BATCH_SIZE, DEFAULT_BATCH_SIZE);
        
        this.updateInsteadReplace = context.getBoolean(CONF_UPDATE_INSTEAD_REPLACE,DEFAULT_UPDATE_INSTEAD_REPLACE);
        
    } catch (IOException ex) {
        throw new MongoSinkException(ex);
    }
}
 
Example 8
Source File: StressSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
/**
 * Read parameters from context
 * <li>-maxTotalEvents = type long that defines the total number of events to be sent
 * <li>-maxSuccessfulEvents = type long that defines the total number of events to be sent
 * <li>-size = type int that defines the number of bytes in each event
 * <li>-batchSize = type int that defines the number of events being sent in one batch
 */
@Override
public void configure(Context context) {
  /* Limit on the total number of events. */
  maxTotalEvents = context.getLong("maxTotalEvents", -1L);
  /* Limit on the total number of successful events. */
  maxSuccessfulEvents = context.getLong("maxSuccessfulEvents", -1L);
  /* Set max events in a batch submission */
  batchSize = context.getInteger("batchSize", 1);
  /* Size of events to be generated. */
  int size = context.getInteger("size", 500);

  prepEventData(size);
}
 
Example 9
Source File: NullSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  batchSize = context.getInteger("batchSize", DFLT_BATCH_SIZE);
  logger.debug(this.getName() + " " +
      "batch size set to " + String.valueOf(batchSize));
  Preconditions.checkArgument(batchSize > 0, "Batch size must be > 0");

  logEveryNEvents = context.getInteger("logEveryNEvents", DFLT_LOG_EVERY_N_EVENTS);
  logger.debug(this.getName() + " " +
      "log event N events set to " + logEveryNEvents);
  Preconditions.checkArgument(logEveryNEvents > 0, "logEveryNEvents must be > 0");
}
 
Example 10
Source File: RabbitMQSource.java    From rabbitmq-flume-plugin with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public void configure(Context context) {
    // Only the queue name does not have a default value
    Configurables.ensureRequiredNonNull(context, QUEUE_KEY);

    // Assign all of the configured values
    hostname = context.getString(HOST_KEY, ConnectionFactory.DEFAULT_HOST);
    port = context.getInteger(PORT_KEY, ConnectionFactory.DEFAULT_AMQP_PORT);
    enableSSL = context.getBoolean(SSL_KEY, false);
    virtualHost = context.getString(VHOST_KEY, ConnectionFactory.DEFAULT_VHOST);
    username = context.getString(USER_KEY, ConnectionFactory.DEFAULT_USER);
    password = context.getString(PASSWORD_KEY, ConnectionFactory.DEFAULT_PASS);
    queue = context.getString(QUEUE_KEY, null);
    exchange = context.getString(EXCHANGE_KEY, null);
    autoAck = context.getBoolean(AUTOACK_KEY, false);
    requeuing = context.getBoolean(REQUEUING, false);
    prefetchCount = context.getInteger(PREFETCH_COUNT_KEY, 0);
    timeout = context.getInteger(TIMEOUT_KEY, -1);
    consumerThreads = context.getInteger(THREAD_COUNT_KEY, 1);

    // Ensure that Flume can connect to RabbitMQ
    testRabbitMQConnection();

    // Create and configure the counters
    sourceCounter = new SourceCounter(getName());
    counterGroup = new CounterGroup();
    counterGroup.setName(getName());
}
 
Example 11
Source File: GangliaServer.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  this.pollFrequency = context.getInteger(this.CONF_POLL_FREQUENCY, 60);
  String localHosts = context.getString(this.CONF_HOSTS);
  if (localHosts == null || localHosts.isEmpty()) {
    throw new ConfigurationException("Hosts list cannot be empty.");
  }
  this.hosts = this.getHostsFromString(localHosts);
  this.isGanglia3 = context.getBoolean(this.CONF_ISGANGLIA3, false);
}
 
Example 12
Source File: PhoenixSink.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public void configure(Context context){
    this.setName(NAME + counter.incrementAndGet());
    this.batchSize = context.getInteger(FlumeConstants.CONFIG_BATCHSIZE, FlumeConstants.DEFAULT_BATCH_SIZE);
    final String eventSerializerType = context.getString(FlumeConstants.CONFIG_SERIALIZER);
    
    Preconditions.checkNotNull(eventSerializerType,"Event serializer cannot be empty, please specify in the configuration file");
    initializeSerializer(context,eventSerializerType);
    this.sinkCounter = new SinkCounter(this.getName());
}
 
Example 13
Source File: MultiLineExecSource.java    From flume-plugins with MIT License 5 votes vote down vote up
@Override
public void configure(Context context) {
	command = context.getString("command");
	eventTerminator = context.getString("event.terminator");
       lineTerminator = context.getString("line.terminator", DEFAULT_LINE_TERMINATOR);

	Preconditions.checkState(command != null, "The parameter command must be specified");
	Preconditions.checkState(lineTerminator != null, "The parameter line.terminator must be specified");

	restartThrottle = context.getLong(CONFIG_RESTART_THROTTLE, DEFAULT_RESTART_THROTTLE);
	restart = context.getBoolean(CONFIG_RESTART, DEFAULT_RESTART);
	logStderr = context.getBoolean(CONFIG_LOG_STDERR, DEFAULT_LOG_STDERR);
	bufferCount = context.getInteger(CONFIG_BATCH_SIZE, DEFAULT_BATCH_SIZE);
	charset = Charset.forName(context.getString(CHARSET, DEFAULT_CHARSET));
}
 
Example 14
Source File: BlobHandler.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  this.maxBlobLength = context.getInteger(MAX_BLOB_LENGTH_KEY, MAX_BLOB_LENGTH_DEFAULT);
  if (this.maxBlobLength <= 0) {
    throw new ConfigurationException("Configuration parameter " + MAX_BLOB_LENGTH_KEY
        + " must be greater than zero: " + maxBlobLength);
  }
}
 
Example 15
Source File: NetcatSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  String hostKey = NetcatSourceConfigurationConstants.CONFIG_HOSTNAME;
  String portKey = NetcatSourceConfigurationConstants.CONFIG_PORT;
  String ackEventKey = NetcatSourceConfigurationConstants.CONFIG_ACKEVENT;

  Configurables.ensureRequiredNonNull(context, hostKey, portKey);

  hostName = context.getString(hostKey);
  port = context.getInteger(portKey);
  ackEveryEvent = context.getBoolean(ackEventKey, true);
  maxLineLength = context.getInteger(
      NetcatSourceConfigurationConstants.CONFIG_MAX_LINE_LENGTH,
      NetcatSourceConfigurationConstants.DEFAULT_MAX_LINE_LENGTH);
}
 
Example 16
Source File: SyslogUDPSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  Configurables.ensureRequiredNonNull(
      context, SyslogSourceConfigurationConstants.CONFIG_PORT);
  port = context.getInteger(SyslogSourceConfigurationConstants.CONFIG_PORT);
  host = context.getString(SyslogSourceConfigurationConstants.CONFIG_HOST);
  formaterProp = context.getSubProperties(
      SyslogSourceConfigurationConstants.CONFIG_FORMAT_PREFIX);
}
 
Example 17
Source File: DruidSink.java    From ingestion with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {

    indexService = context.getString(INDEX_SERVICE);
    discoveryPath = context.getString(DISCOVERY_PATH);
    dimensions = Arrays.asList(context.getString(DIMENSIONS).split(","));
    firehosePattern = context.getString(FIREHOSE_PATTERN, DEFAULT_FIREHOSE);
    dataSource = context.getString(DATA_SOURCE, DEFAUL_DATASOURCE);
    aggregators = AggregatorsHelper.build(context.getString(AGGREGATORS));
    queryGranularity = QueryGranularityHelper.getGranularity(context.getString(QUERY_GRANULARITY,
            DEFAULT_QUERY_GRANULARITY));
    segmentGranularity = Granularity.valueOf(context.getString(SEGMENT_GRANULARITY, DEFAULT_SEGMENT_GRANULARITY));
    period = context.getString(WINDOW_PERIOD, DEFAULT_PERIOD);
    partitions = context.getInteger(PARTITIONS, DEFAULT_PARTITIONS);
    replicants = context.getInteger(REPLICANTS, DEFAULT_REPLICANTS);
    // Tranquility needs to be able to extract timestamps from your object type (in this case, Map<String, Object>).
    timestampField = context.getString(TIMESTAMP_FIELD, DEFAULT_TIMESTAMP_FIELD);
    zookeeperLocation = context.getString(ZOOKEEPER_LOCATION, DEFAULT_ZOOKEEPER_LOCATION);
    baseSleppTime = context.getInteger(ZOOKEEPPER_BASE_SLEEP_TIME, DEFAULT_ZOOKEEPER_BASE_SLEEP);
    maxRetries = context.getInteger(ZOOKEEPER_MAX_RETRIES, DEFAULT_ZOOKEEPER_MAX_RETRIES);
    maxSleep = context.getInteger(ZOOKEEPER_MAX_SLEEP, DEFAULT_ZOOKEEPER_MAX_SLEEP);
    batchSize = context.getInteger(BATCH_SIZE, DEFAULT_BATCH_SIZE);

    druidService = buildDruidService();
    sinkCounter = new SinkCounter(this.getName());
    eventParser = new EventParser(timestampField);
}
 
Example 18
Source File: HTTPMetricsServer.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Context context) {
  port = context.getInteger(CONFIG_PORT, DEFAULT_PORT);
}
 
Example 19
Source File: HashtagTopNInterceptor.java    From flume-interceptor-analytics with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Context context) {
  this.topN = context.getInteger(TOP_N);
}
 
Example 20
Source File: NGSIMySQLSink.java    From fiware-cygnus with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public void configure(Context context) {
    mysqlHost = context.getString("mysql_host", DEFAULT_HOST);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_host=" + mysqlHost + ")");
    mysqlPort = context.getString("mysql_port", DEFAULT_PORT);
    int intPort = Integer.parseInt(mysqlPort);
    
    if ((intPort <= 0) || (intPort > 65535)) {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (mysql_port=" + mysqlPort + ") "
                + "must be between 0 and 65535");
    } else {
        LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_port=" + mysqlPort + ")");
    }  // if else
    
    mysqlUsername = context.getString("mysql_username", DEFAULT_USER_NAME);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_username=" + mysqlUsername + ")");
    // FIXME: mysqlPassword should be read encrypted and decoded here
    mysqlPassword = context.getString("mysql_password", DEFAULT_PASSWORD);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_password=" + mysqlPassword + ")");
    
    maxPoolSize = context.getInteger("mysql_maxPoolSize", DEFAULT_MAX_POOL_SIZE);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_maxPoolSize=" + maxPoolSize + ")");
    
    rowAttrPersistence = context.getString("attr_persistence", DEFAULT_ROW_ATTR_PERSISTENCE).equals("row");
    String persistence = context.getString("attr_persistence", DEFAULT_ROW_ATTR_PERSISTENCE);
    
    if (persistence.equals("row") || persistence.equals("column")) {
        LOGGER.debug("[" + this.getName() + "] Reading configuration (attr_persistence="
            + persistence + ")");
    } else {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (attr_persistence="
            + persistence + ") must be 'row' or 'column'");
    }  // if else

    String attrNativeTypesStr = context.getString("attr_native_types", DEFAULT_ATTR_NATIVE_TYPES);
    if (attrNativeTypesStr.equals("true") || attrNativeTypesStr.equals("false")) {
        attrNativeTypes = Boolean.valueOf(attrNativeTypesStr);
        LOGGER.debug("[" + this.getName() + "] Reading configuration (attr_native_types=" + attrNativeTypesStr + ")");
    } else {
        invalidConfiguration = true;
        LOGGER.debug("[" + this.getName() + "] Invalid configuration (attr_native_types="
            + attrNativeTypesStr + ") -- Must be 'true' or 'false'");
    } // if else

    String attrMetadataStoreSrt = context.getString("attr_metadata_store", "true");

    if (attrMetadataStoreSrt.equals("true") || attrMetadataStoreSrt.equals("false")) {
        attrMetadataStore = Boolean.parseBoolean(attrMetadataStoreSrt);
        LOGGER.debug("[" + this.getName() + "] Reading configuration (attr_metadata_store="
                + attrMetadataStore + ")");
    } else {
        invalidConfiguration = true;
        LOGGER.debug("[" + this.getName() + "] Invalid configuration (attr_metadata_store="
                + attrNativeTypesStr + ") -- Must be 'true' or 'false'");
    } // if else

    mysqlOptions = context.getString("mysql_options", null);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mysql_options=" + mysqlOptions + ")");

    super.configure(context);
}