Java Code Examples for org.apache.flume.Context#getLong()

The following examples show how to use org.apache.flume.Context#getLong() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractHDFSWriter.java    From Transwarp-Sample-Code with MIT License 6 votes vote down vote up
@Override
public void configure(Context context) {
  configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas");
  if (configuredMinReplicas != null) {
    Preconditions.checkArgument(configuredMinReplicas >= 0,
        "hdfs.minBlockReplicas must be greater than or equal to 0");
  }
  numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1;

  if (numberOfCloseRetries > 1) {
    try {
      timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 10000l);
    } catch (NumberFormatException e) {
      logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout"));
    }
    timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries/numberOfCloseRetries, 1000);
  }

}
 
Example 2
Source File: HDFSStorage.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
/**
 * This stores the Identifier information identified in the last store function call
 *
 * @param ctx
 */
@Override
public void configure(Context ctx)
{
  String tempId = ctx.getString(ID);
  if (tempId == null) {
    if (id == null) {
      throw new IllegalArgumentException("id can't be  null.");
    }
  } else {
    id = tempId;
  }

  String tempBaseDir = ctx.getString(BASE_DIR_KEY);
  if (tempBaseDir != null) {
    baseDir = tempBaseDir;
  }

  restore = ctx.getBoolean(RESTORE_KEY, restore);
  Long tempBlockSize = ctx.getLong(BLOCKSIZE);
  if (tempBlockSize != null) {
    blockSize = tempBlockSize;
  }
  blockSizeMultiple = ctx.getInteger(BLOCK_SIZE_MULTIPLE, blockSizeMultiple);
  retryCount = ctx.getInteger(NUMBER_RETRY,retryCount);
}
 
Example 3
Source File: DirFileRecorder.java    From flume_monitor_source with Apache License 2.0 6 votes vote down vote up
public void Configure(Context context) {
  LOG.info("Configure DirFileRecorder.");
  file_monitor_.Configure(context);
  String meta_dir = context.getString(FlumeConstants.META_STORE_DIR,
      "./meta/");
  send_interval_ = context.getLong(FlumeConstants.FILE_SEND_INTERVAL, 3L);
  String tmp_meta_store_file = meta_dir + FlumeConstants.DIR_SEP + 
                     file_monitor_.GetMonitorDir().hashCode();
  File tmp_file = new File(tmp_meta_store_file);
  meta_store_file_ = tmp_file.getAbsolutePath(); // 
  
  auto_delete_line_delimiter_ = 
      context.getBoolean(FlumeConstants.AUTO_DELETE_LINE_DEILMITER, false);
  
  reader_writer_.Configure(meta_store_file_);
  
  file_parser_ = new MultiLineParser();
  file_parser_.Configure(context);
  
  executor_service_ = Executors.newScheduledThreadPool(1);
  sender_runnable_ = new SenderRunnable();
  file_info_map_ = new ConcurrentHashMap<Integer, FileInfo>();
}
 
Example 4
Source File: SimpleFileMonitor.java    From flume_monitor_source with Apache License 2.0 6 votes vote down vote up
@Override
public void Configure(Context context) {
  // TODO Auto-generated method stub
  monitor_dir_ = context.getString(FlumeConstants.MONITOR_DIR);
  Preconditions.checkState(monitor_dir_ != null,
      "you must specified  \'monitor_dir\' in config file");
  check_interval_sec_ = context.getLong(FlumeConstants.FILE_CHECK_INTERVAL,
      default_interval_sec);
  file_name_file_name_include_pattern_str_ = context.getString(
      FlumeConstants.FILE_NAME_INCLUDE, default_file_name_include_str_);
  file_name_file_name_exclude_pattern_str_ = context.getString(
      FlumeConstants.FILE_NAME_EXCLUDE, default_file_name_exclude_str);

  shell_result_pattern_ = Pattern.compile(FlumeConstants.SHELL_RESULT_REGEX);
  file_name_include_pattern_ = Pattern
      .compile(file_name_file_name_include_pattern_str_);
  file_name_exclude_pattern_ = Pattern
      .compile(file_name_file_name_exclude_pattern_str_);

  integer_pattern_ = Pattern.compile(FlumeConstants.INTEGER_REGEX);

  shell_command_ = FlumeConstants.GetShellCommand(monitor_dir_);
  shell_executor_ = new Shell.ShellCommandExecutor(shell_command_);
  executor_service_ = Executors.newScheduledThreadPool(1);
}
 
Example 5
Source File: WatchDir.java    From flume-taildirectory-source with Apache License 2.0 5 votes vote down vote up
private void loadConfiguration(Context context) {
	
	timeToUnlockFile = context.getLong(UNLOCK_TIME, 1L);
	fileHeader = new Boolean(context.getBoolean(FILE_HEADER, false));
	fileHeaderKey = new String(context.getString(FILE_HEADER_KEY, "file"));
	basenameHeader = new Boolean(context.getBoolean(BASENAME_HEADER, false));
	basenameHeaderKey = new String(context.getString(BASENAME_HEADER_KEY, "basename"));
	followLinks = new Boolean(context.getBoolean(FOLLOW_LINKS, false));
}
 
Example 6
Source File: NGSIMongoSink.java    From fiware-cygnus with GNU Affero General Public License v3.0 5 votes vote down vote up
@Override
public void configure(Context context) {
    collectionsSize = context.getLong("collections_size", 0L);
    
    if ((collectionsSize > 0) && (collectionsSize < 4096)) {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (collections_size="
                + collectionsSize + ") -- Must be greater than or equal to 4096");
    } else {
        LOGGER.debug("[" + this.getName() + "] Reading configuration (collections_size=" + collectionsSize + ")");
    }  // if else
   
    maxDocuments = context.getLong("max_documents", 0L);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (max_documents=" + maxDocuments + ")");
    
    String attrPersistenceStr = context.getString("attr_persistence", "row");
    
    if (attrPersistenceStr.equals("row") || attrPersistenceStr.equals("column")) {
        rowAttrPersistence = attrPersistenceStr.equals("row");
        LOGGER.debug("[" + this.getName() + "] Reading configuration (attr_persistence="
            + attrPersistenceStr + ")");
    } else {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (attr_persistence="
            + attrPersistenceStr + ") must be 'row' or 'column'");
    }  // if else
    
    attrMetadataStore = context.getString("attr_metadata_store", "false");

    if (attrMetadataStore.equals("true") || attrMetadataStore.equals("false")) {
        LOGGER.debug("[" + this.getName() + "] Reading configuration (attr_metadata_store="
                + attrMetadataStore + ")");
    } else {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (attr_metadata_store="
                + attrMetadataStore + ") must be 'true' or 'false'");
    } // if else 

    super.configure(context);
}
 
Example 7
Source File: AbstractSinkSelector.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
  Long timeOut = context.getLong("maxTimeOut");
  if(timeOut != null){
    maxTimeOut = timeOut;
  }
}
 
Example 8
Source File: StressSource.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
/**
 * Read parameters from context
 * <li>-maxTotalEvents = type long that defines the total number of events to be sent
 * <li>-maxSuccessfulEvents = type long that defines the total number of events to be sent
 * <li>-size = type int that defines the number of bytes in each event
 * <li>-batchSize = type int that defines the number of events being sent in one batch
 */
@Override
public void configure(Context context) {
  /* Limit on the total number of events. */
  maxTotalEvents = context.getLong("maxTotalEvents", -1L);
  /* Limit on the total number of successful events. */
  maxSuccessfulEvents = context.getLong("maxSuccessfulEvents", -1L);
  /* Set max events in a batch submission */
  batchSize = context.getInteger("batchSize", 1);
  /* Size of events to be generated. */
  int size = context.getInteger("size", 500);

  prepEventData(size);
}
 
Example 9
Source File: KafkaSink.java    From mt-flume with Apache License 2.0 5 votes vote down vote up
@Override
public void configure(Context context) {
    this.brokerList = context.getString("brokerList");
    Preconditions.checkNotNull(brokerList, "brokerList is required.");
    this.requestRequiredAcks = context.getInteger("requestRequiredAcks", 0);
    this.requestTimeoutms = context.getLong("requestTimeoutms", Long.valueOf(10000));
    this.serializerClass = context.getString("serializerClass", "kafka.serializer.StringEncoder");
    this.partitionerClass = context.getString("partitionerClass", "kafka.producer.DefaultPartitioner");
    this.producerType = context.getString("producerType", "async");
    this.batchNumMessages = context.getInteger("batchNumMessages", 200);
    this.queueBufferingMaxMessages = context.getInteger("queueBufferingMaxMessages", 1000);
    
    this.topicPrefix = context.getString("topicPrefix");
    Preconditions.checkNotNull(topicPrefix, "topicPrefix is required.");
}
 
Example 10
Source File: MultiLineExecSource.java    From flume-plugins with MIT License 5 votes vote down vote up
@Override
public void configure(Context context) {
	command = context.getString("command");
	eventTerminator = context.getString("event.terminator");
       lineTerminator = context.getString("line.terminator", DEFAULT_LINE_TERMINATOR);

	Preconditions.checkState(command != null, "The parameter command must be specified");
	Preconditions.checkState(lineTerminator != null, "The parameter line.terminator must be specified");

	restartThrottle = context.getLong(CONFIG_RESTART_THROTTLE, DEFAULT_RESTART_THROTTLE);
	restart = context.getBoolean(CONFIG_RESTART, DEFAULT_RESTART);
	logStderr = context.getBoolean(CONFIG_LOG_STDERR, DEFAULT_LOG_STDERR);
	bufferCount = context.getInteger(CONFIG_BATCH_SIZE, DEFAULT_BATCH_SIZE);
	charset = Charset.forName(context.getString(CHARSET, DEFAULT_CHARSET));
}
 
Example 11
Source File: KafkaChannel.java    From flume-plugin with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Context ctx) {
    String topicStr = ctx.getString(TOPIC);
    if (topicStr == null || topicStr.isEmpty()) {
        topicStr = DEFAULT_TOPIC;
        LOGGER
                .info("Topic was not specified. Using " + topicStr + " as the topic.");
    }
    topic.set(topicStr);
    String groupId = ctx.getString(GROUP_ID_FLUME);
    if (groupId == null || groupId.isEmpty()) {
        groupId = DEFAULT_GROUP_ID;
        LOGGER.info(
                "Group ID was not specified. Using " + groupId + " as the group id.");
    }
    String brokerList = ctx.getString(BROKER_LIST_FLUME_KEY);
    if (brokerList == null || brokerList.isEmpty()) {
        throw new ConfigurationException("Broker List must be specified");
    }
    String zkConnect = ctx.getString(ZOOKEEPER_CONNECT_FLUME_KEY);
    if (zkConnect == null || zkConnect.isEmpty()) {
        throw new ConfigurationException(
                "Zookeeper Connection must be specified");
    }
    Long timeout = ctx.getLong(TIMEOUT, Long.valueOf(DEFAULT_TIMEOUT));
    kafkaConf.putAll(ctx.getSubProperties(KAFKA_PREFIX));
    kafkaConf.put(GROUP_ID, groupId);
    kafkaConf.put(BROKER_LIST_KEY, brokerList);
    kafkaConf.put(ZOOKEEPER_CONNECT, zkConnect);
    kafkaConf.put(AUTO_COMMIT_ENABLED, String.valueOf(false));
    kafkaConf.put(CONSUMER_TIMEOUT, String.valueOf(timeout));
    kafkaConf.put(REQUIRED_ACKS_KEY, "-1");
    LOGGER.info(kafkaConf.toString());
    parseAsFlumeEvent =
            ctx.getBoolean(PARSE_AS_FLUME_EVENT, DEFAULT_PARSE_AS_FLUME_EVENT);

    boolean readSmallest = ctx.getBoolean(READ_SMALLEST_OFFSET,
            DEFAULT_READ_SMALLEST_OFFSET);
    // If the data is to be parsed as Flume events, we always read the smallest.
    // Else, we read the configuration, which by default reads the largest.
    if (parseAsFlumeEvent || readSmallest) {
        // readSmallest is eval-ed only if parseAsFlumeEvent is false.
        // The default is largest, so we don't need to set it explicitly.
        kafkaConf.put("auto.offset.reset", "smallest");
    }

    if (counter == null) {
        counter = new KafkaChannelCounter(getName());
    }

}
 
Example 12
Source File: FlumeSink.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Context context)
{
  hostname = context.getString(HOSTNAME_STRING, HOSTNAME_DEFAULT);
  port = context.getInteger("port", 0);
  id = context.getString("id");
  if (id == null) {
    id = getName();
  }
  acceptedTolerance = context.getLong("acceptedTolerance", ACCEPTED_TOLERANCE);
  sleepMillis = context.getLong("sleepMillis", 5L);
  throughputAdjustmentFactor = context.getInteger("throughputAdjustmentPercent", 5) / 100.0;
  maximumEventsPerTransaction = context.getInteger("maximumEventsPerTransaction", 10000);
  minimumEventsPerTransaction = context.getInteger("minimumEventsPerTransaction", 100);
  commitEventTimeoutMillis = context.getLong("commitEventTimeoutMillis", Long.MAX_VALUE);

  @SuppressWarnings("unchecked")
  Discovery<byte[]> ldiscovery = configure("discovery", Discovery.class, context);
  if (ldiscovery == null) {
    logger.warn("Discovery agent not configured for the sink!");
    discovery = new Discovery<byte[]>()
    {
      @Override
      public void unadvertise(Service<byte[]> service)
      {
        logger.debug("Sink {} stopped listening on {}:{}", service.getId(), service.getHost(), service.getPort());
      }

      @Override
      public void advertise(Service<byte[]> service)
      {
        logger.debug("Sink {} started listening on {}:{}", service.getId(), service.getHost(), service.getPort());
      }

      @Override
      @SuppressWarnings("unchecked")
      public Collection<Service<byte[]>> discover()
      {
        return Collections.EMPTY_SET;
      }

    };
  } else {
    discovery = ldiscovery;
  }

  storage = configure("storage", Storage.class, context);
  if (storage == null) {
    logger.warn("storage key missing... FlumeSink may lose data!");
    storage = new Storage()
    {
      @Override
      public byte[] store(Slice slice)
      {
        return null;
      }

      @Override
      public byte[] retrieve(byte[] identifier)
      {
        return null;
      }

      @Override
      public byte[] retrieveNext()
      {
        return null;
      }

      @Override
      public void clean(byte[] identifier)
      {
      }

      @Override
      public void flush()
      {
      }

    };
  }

  @SuppressWarnings("unchecked")
  StreamCodec<Event> lCodec = configure("codec", StreamCodec.class, context);
  if (lCodec == null) {
    codec = new EventCodec();
  } else {
    codec = lCodec;
  }

}
 
Example 13
Source File: NGSIMongoBaseSink.java    From fiware-cygnus with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public void configure(Context context) {
    super.configure(context);

    mongoHosts = context.getString("mongo_hosts", "localhost:27017");
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mongo_hosts=" + mongoHosts + ")");
    mongoUsername = context.getString("mongo_username", "");
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mongo_username=" + mongoUsername + ")");
    // FIXME: mongoPassword should be read as a SHA1 and decoded here
    mongoPassword = context.getString("mongo_password", "");
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mongo_password=" + mongoPassword + ")");
    mongoAuthSource = context.getString("mongo_auth_source", "");
    LOGGER.debug("[" + this.getName() + "] Reading configuration (mongo_auth_source=" + mongoAuthSource + ")");

    if (enableEncoding) {
        dbPrefix = NGSICharsets.encodeMongoDBDatabase(context.getString("db_prefix", "sth_"));
    } else {
        dbPrefix = NGSIUtils.encodeSTHDB(context.getString("db_prefix", "sth_"));
    } // if else

    LOGGER.debug("[" + this.getName() + "] Reading configuration (db_prefix=" + dbPrefix + ")");

    if (enableEncoding) {
        collectionPrefix = NGSICharsets.encodeMongoDBCollection(context.getString("collection_prefix", "sth_"));
    } else {
        collectionPrefix = NGSIUtils.encodeSTHCollection(context.getString("collection_prefix", "sth_"));
    } // if else

    if (collectionPrefix.equals("system.")) {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (collection_prefix="
            + collectionPrefix + ") -- Cannot be 'system.'");
    } else {
        LOGGER.debug("[" + this.getName() + "] Reading configuration (collection_prefix=" + collectionPrefix + ")");
    } // if else

    dataExpiration = context.getLong("data_expiration", 0L);
    LOGGER.debug("[" + this.getName() + "] Reading configuration (data_expiration=" + dataExpiration + ")");

    String ignoreWhiteSpacesStr = context.getString("ignore_white_spaces", "true");

    if (ignoreWhiteSpacesStr.equals("true") || ignoreWhiteSpacesStr.equals("false")) {
        ignoreWhiteSpaces = Boolean.valueOf(ignoreWhiteSpacesStr);
        LOGGER.debug("[" + this.getName() + "] Reading configuration (ignore_white_spaces="
            + ignoreWhiteSpacesStr + ")");
    }  else {
        invalidConfiguration = true;
        LOGGER.warn("[" + this.getName() + "] Invalid configuration (ignore_white_spaces="
            + ignoreWhiteSpacesStr + ") -- Must be 'true' or 'false'");
    }  // if else
}
 
Example 14
Source File: HBaseSink.java    From mt-flume with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public void configure(Context context){
  tableName = context.getString(HBaseSinkConfigurationConstants.CONFIG_TABLE);
  String cf = context.getString(
      HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY);
  batchSize = context.getLong(
      HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, new Long(100));
  serializerContext = new Context();
  //If not specified, will use HBase defaults.
  eventSerializerType = context.getString(
      HBaseSinkConfigurationConstants.CONFIG_SERIALIZER);
  Preconditions.checkNotNull(tableName,
      "Table name cannot be empty, please specify in configuration file");
  Preconditions.checkNotNull(cf,
      "Column family cannot be empty, please specify in configuration file");
  //Check foe event serializer, if null set event serializer type
  if(eventSerializerType == null || eventSerializerType.isEmpty()) {
    eventSerializerType =
        "org.apache.flume.sink.hbase.SimpleHbaseEventSerializer";
    logger.info("No serializer defined, Will use default");
  }
  serializerContext.putAll(context.getSubProperties(
          HBaseSinkConfigurationConstants.CONFIG_SERIALIZER_PREFIX));
  columnFamily = cf.getBytes(Charsets.UTF_8);
  try {
    Class<? extends HbaseEventSerializer> clazz =
        (Class<? extends HbaseEventSerializer>)
        Class.forName(eventSerializerType);
    serializer = clazz.newInstance();
    serializer.configure(serializerContext);
  } catch (Exception e) {
    logger.error("Could not instantiate event serializer." , e);
    Throwables.propagate(e);
  }
  kerberosKeytab = context.getString(HBaseSinkConfigurationConstants.CONFIG_KEYTAB, "");
  kerberosPrincipal = context.getString(HBaseSinkConfigurationConstants.CONFIG_PRINCIPAL, "");

  enableWal = context.getBoolean(HBaseSinkConfigurationConstants
    .CONFIG_ENABLE_WAL, HBaseSinkConfigurationConstants.DEFAULT_ENABLE_WAL);
  logger.info("The write to WAL option is set to: " + String.valueOf(enableWal));
  if(!enableWal) {
    logger.warn("HBase Sink's enableWal configuration is set to false. All " +
      "writes to HBase will have WAL disabled, and any data in the " +
      "memstore of this region in the Region Server could be lost!");
  }
  sinkCounter = new SinkCounter(this.getName());
}
 
Example 15
Source File: DatasetSink.java    From kite with Apache License 2.0 4 votes vote down vote up
@Override
public void configure(Context context) {
  // initialize login credentials
  this.login = KerberosUtil.login(
      context.getString(DatasetSinkConstants.AUTH_PRINCIPAL),
      context.getString(DatasetSinkConstants.AUTH_KEYTAB));
  String effectiveUser =
      context.getString(DatasetSinkConstants.AUTH_PROXY_USER);
  if (effectiveUser != null) {
    this.login = KerberosUtil.proxyAs(effectiveUser, login);
  }

  String datasetURI = context.getString(
      DatasetSinkConstants.CONFIG_KITE_DATASET_URI);
  if (datasetURI != null) {
    this.target = URI.create(datasetURI);
    this.datasetName = uriToName(target);
  } else {
    String repositoryURI = context.getString(
        DatasetSinkConstants.CONFIG_KITE_REPO_URI);
    Preconditions.checkNotNull(repositoryURI, "Repository URI is missing");
    this.datasetName = context.getString(
        DatasetSinkConstants.CONFIG_KITE_DATASET_NAME);
    Preconditions.checkNotNull(datasetName, "Dataset name is missing");

    this.target = new URIBuilder(repositoryURI, URIBuilder.NAMESPACE_DEFAULT,
        datasetName).build();
  }

  this.setName(target.toString());

  // other configuration
  this.batchSize = context.getLong(
      DatasetSinkConstants.CONFIG_KITE_BATCH_SIZE,
      DatasetSinkConstants.DEFAULT_BATCH_SIZE);
  this.rollIntervalS = context.getInteger(
      DatasetSinkConstants.CONFIG_KITE_ROLL_INTERVAL,
      DatasetSinkConstants.DEFAULT_ROLL_INTERVAL);

  this.counter = new SinkCounter(datasetName);
}