Python config.get_config() Examples

The following are 23 code examples of config.get_config(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module config , or try the search function .
Example #1
Source File: main.py    From DQN-tensorflow with MIT License 6 votes vote down vote up
def main(_):
  gpu_options = tf.GPUOptions(
      per_process_gpu_memory_fraction=calc_gpu_fraction(FLAGS.gpu_fraction))

  with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    config = get_config(FLAGS) or FLAGS

    if config.env_type == 'simple':
      env = SimpleGymEnvironment(config)
    else:
      env = GymEnvironment(config)

    if not tf.test.is_gpu_available() and FLAGS.use_gpu:
      raise Exception("use_gpu flag is true when no GPUs are available")

    if not FLAGS.use_gpu:
      config.cnn_format = 'NHWC'

    agent = Agent(config, env, sess)

    if FLAGS.is_train:
      agent.train()
    else:
      agent.play() 
Example #2
Source File: worker.py    From browsertrix with MIT License 6 votes vote down vote up
def init(browser_type):
    """ Initialize the uwsgi worker which will read urls to archive from redis queue
    and use associated web driver to connect to remote web browser
    """
    logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
                        level=logging.DEBUG)
    logging.debug('WebDriver Worker Started')

    config = get_config()

    archives = config['archives']

    rc = init_redis(config)

    browser = get_avail_browser(config, rc, browser_type)

    run(rc, browser, archives, config, browser_type) 
Example #3
Source File: synthia.py    From SpatioTemporalSegmentation with MIT License 6 votes vote down vote up
def test(self):
    from torch.utils.data import DataLoader
    from lib.utils import Timer
    from config import get_config
    config = get_config()

    dataset = SynthiaVoxelizationDataset(config)
    timer = Timer()

    data_loader = DataLoader(
        dataset=dataset,
        collate_fn=cfl_collate_fn_factory(limit_numpoints=False),
        num_workers=0,
        batch_size=4,
        shuffle=True)

    # Start from index 1
    # for i, batch in enumerate(data_loader, 1):
    iter = data_loader.__iter__()
    for i in range(100):
      timer.tic()
      batch = iter.next()
      print(batch, timer.toc()) 
Example #4
Source File: scheduler.py    From destalinator with Apache License 2.0 6 votes vote down vote up
def destalinate_job():
    raven_client = RavenClient()

    logging.info("Destalinating")
    if not get_config().sb_token or not get_config().api_token:
        logging.error(
            "Missing at least one required Slack environment variable.\n"
            "Make sure to set DESTALINATOR_SB_TOKEN and DESTALINATOR_API_TOKEN."
        )
    else:
        try:
            archiver.Archiver().archive()
            warner.Warner().warn()
            announcer.Announcer().announce()
            flagger.Flagger().flag()
            logging.info("OK: destalinated")
        except Exception as e:  # pylint: disable=W0703
            raven_client.captureException()
            if not get_config().sentry_dsn:
                raise e
    logging.info("END: destalinate_job") 
Example #5
Source File: towel_mode.py    From vldc-bot with MIT License 6 votes vote down vote up
def add_towel_mode(upd: Updater, handlers_group: int):
    logger.info("registering towel-mode handlers")
    dp = upd.dispatcher

    # catch all new users and drop the towel
    dp.add_handler(MessageHandler(Filters.status_update.new_chat_members, catch_new_user),
                   handlers_group)

    # check for reply or remove messages
    dp.add_handler(MessageHandler(
        Filters.group & ~Filters.status_update, catch_reply),
        handlers_group
    )

    # "i am a bot button"
    dp.add_handler(CallbackQueryHandler(i_am_a_bot_btn), handlers_group)

    # ban quarantine users, if time is gone
    upd.job_queue.run_repeating(ban_user, interval=60, first=60, context={
        "chat_id": get_config()["GROUP_CHAT_ID"]
    }) 
Example #6
Source File: gui_utils.py    From hashmal with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, parent=None):
        super(OutputAmountEdit, self).__init__(parent)
        self.config = config.get_config()
        self.config.optionChanged.connect(self.on_option_changed)
        self.amount_format = self.config.get_option('amount_format', 'coins')
        self.textChanged.connect(self.check_text) 
Example #7
Source File: mocks.py    From destalinator with Apache License 2.0 5 votes vote down vote up
def mocked_slacker_object(channels_list=None, users_list=None, messages_list=None, emoji_list=None):
    slacker_obj = slacker.Slacker(get_config().slack_name, token='token', init=False)

    slacker_obj.get_all_channel_objects = mock.MagicMock(return_value=channels_list or [])
    slacker_obj.get_channels()

    slacker_obj.get_all_user_objects = mock.MagicMock(return_value=users_list or [])
    slacker_obj.get_users()

    slacker_obj.get_messages_in_time_range = mock.MagicMock(return_value=messages_list or [])
    slacker_obj.get_emojis = mock.MagicMock(return_value=emoji_list or [])

    return slacker_obj 
Example #8
Source File: test_config.py    From destalinator with Apache License 2.0 5 votes vote down vote up
def test_environment_variable_configs(self):
        self.assertEqual(get_config().string_variable, 'test')
        self.assertListEqual(get_config().list_variable, ['test']) 
Example #9
Source File: slack_logging.py    From destalinator with Apache License 2.0 5 votes vote down vote up
def set_up_slack_logger(slackbot=None):
    """
    Sets up a handler and formatter on a given `logging.Logger` object.

    * `log_level_env_var` - Grabs logging level from this ENV var. Possible values are standard: "debug", "error", etc.
    * `log_to_slack_env_var` - Points to an ENV var that indicates whether to log to a Slack channel.
    * `log_channel` - Indicates the name of the Slack channel to which we'll send logs.
    * `default_level` - The default log level if one is not set in the environment.
    * `slackbot` - A slackbot.Slackbot() object ready to send messages to a Slack channel.
    """
    logger = logging.getLogger()

    if logger.handlers:
        # We've likely already ran through the rest of this method:
        return

    _config = get_config()

    slack_log_level = getattr(logging, _config.log_level.upper(), logging.INFO)

    formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')

    logger.setLevel(logging.DEBUG)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)

    logger.addHandler(stream_handler)

    if _config.log_to_channel and _config.log_channel and slackbot:
        logger.debug("Logging to slack channel: %s", _config.log_channel)
        slack_handler = SlackHandler(slackbot=slackbot, level=slack_log_level)
        slack_handler.setFormatter(formatter)
        logger.addHandler(slack_handler) 
Example #10
Source File: scheduler.py    From destalinator with Apache License 2.0 5 votes vote down vote up
def main():
    # Use RUN_ONCE to only run the destalinate job once immediately
    if get_config().run_once:
        destalinate_job()
    else:
        schedule_job() 
Example #11
Source File: scheduler.py    From destalinator with Apache License 2.0 5 votes vote down vote up
def schedule_job():
    # When testing changes, set the "TEST_SCHEDULE" envvar to run more often
    if get_config().test_schedule:
        schedule_kwargs = {"hour": "*", "minute": "*/10"}
    else:
        schedule_kwargs = {"hour": get_config().schedule_hour}

    sched = BlockingScheduler()
    sched.add_job(destalinate_job, "cron", **schedule_kwargs)
    sched.start() 
Example #12
Source File: main.py    From ALISTA with MIT License 5 votes vote down vote up
def main ():
    # parse configuration
    config, _ = get_config()
    # set visible GPUs
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu

    if config.test:
        run_test (config)
    else:
        run_train (config)
    # end of main 
Example #13
Source File: main.py    From ALISTA with MIT License 5 votes vote down vote up
def main ():
    # parse configuration
    config, _ = get_config()
    # set visible GPUs
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu

    if config.test:
        run_test (config)
    else:
        run_train (config)
    # end of main 
Example #14
Source File: test.py    From DGCNN with MIT License 5 votes vote down vote up
def main():
    testing_file = "./new_data/test.ann.json"
    trained_model = "./checkpoints/model.ckpt"
    embedding_file = "D:/DataMining/QASystem/wiki/wiki.zh.text.vector"
    # embedding_file = "./wiki.zh.text.vector"
    embedding_size = 60  # Word embedding dimension
    batch_size = 64  # Batch data size
    sequence_length = 150  # Sentence length
    learning_rate = 0.01
    gpu_mem_usage = 0.75
    gpu_device = "/gpu:0"
    cpu_device = "/cpu:0"

    config = get_config()  # Not used yet
    embeddings, word2idx = load_embedding(embedding_file)
    questions, evidences, y1, y2 = load_data(testing_file, word2idx, sequence_length)
    with tf.Graph().as_default(), tf.device(gpu_device):
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_usage)
        session_conf = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
        with tf.variable_scope('Model'):
            model = DGCNN(config, embeddings, sequence_length, embedding_size)
            with tf.Session(config=session_conf).as_default() as sess:
                saver = tf.train.Saver()
                print("Start loading the model")
                saver.restore(sess, trained_model)
                print("The model is loaded")
                acc1, acc2 = [], []
                for batch_questions, batch_evidences, batch_y1, batch_y2 in next_batch(questions, evidences, y1, y2, batch_size):
                    feed_dict = {
                        model.e: batch_evidences,
                        model.q: batch_questions,
                        model.y1: batch_y1,
                        model.y2: batch_y2,
                        model.is_train: False
                    }
                    acc1_, acc2_ = sess.run([model.acc1, model.acc2], feed_dict)
                    acc1.append(acc1_)
                    acc2.append(acc2_)
                    print('Acc1 %2.3f\tAcc2 %2.3f' % (acc1_, acc2_))
                print('Average: Acc1 %2.3f\tAcc2 %2.3f' % (np.mean(acc1), np.mean(acc2))) 
Example #15
Source File: predict.py    From ecg-mit-bih with GNU General Public License v3.0 5 votes vote down vote up
def predictByPart(data, peaks):
    classesM = ['N','Ventricular','Paced','A','F','Noise']#,'L','R','f','j','E','a','J','Q','e','S']
    predicted = list()
    result = ""
    counter = [0]* len(classesM)
    from keras.models import load_model
    model = load_model('models/MLII-latest.hdf5')
    config = get_config() 
    for i, peak in enumerate(peaks[3:-1]):
      total_n =len(peaks)
      start, end =  peak-config.input_size//2 , peak+config.input_size//2
      prob = model.predict(data[:, start:end])
      prob = prob[:,0]
      ann = np.argmax(prob)
      counter[ann]+=1
      if classesM[ann] != "N":
        print("The {}/{}-record classified as {} with {:3.1f}% certainty".format(i,total_n,classesM[ann],100*prob[0,ann]))
      result += "("+ classesM[ann] +":" + str(round(100*prob[0,ann],1)) + "%)"
      predicted.append([classesM[ann],prob])
      if classesM[ann] != 'N' and prob[0,ann] > 0.95:
        import matplotlib.pyplot as plt
        plt.plot(data[:, start:end][0,:,0],)
        mkdir_recursive('results')
        plt.savefig('results/hazard-'+classesM[ann]+'.png', format="png", dpi = 300)
        plt.close()
    result += "{}-N, {}-Venticular, {}-Paced, {}-A, {}-F, {}-Noise".format(counter[0], counter[1], counter[2], counter[3], counter[4], counter[5])
    return predicted, result 
Example #16
Source File: main.py    From RL-Restore with MIT License 5 votes vote down vote up
def main(_):
    with tf.Session() as sess:
        config = get_config(FLAGS)
        env = MyEnvironment(config)
        agent = Agent(config, env, sess)

        if FLAGS.is_train:
            agent.train()
        else:
            if FLAGS.dataset == 'mine':
                agent.play_mine()
            else:
                agent.play() 
Example #17
Source File: gui_utils.py    From hashmal with GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, satoshis=0):
        super(Amount, self).__init__()
        self.satoshis = satoshis
        self.config = config.get_config()
        self.fmt = self.config.get_option('amount_format', 'satoshis') 
Example #18
Source File: get_conf_from_env_test.py    From vldc-bot with MIT License 5 votes vote down vote up
def test_get_config(self):
        c: Dict = get_config()
        self.assertEqual(c["DEBUG"], self.env_debug)
        self.assertEqual(c["GROUP_CHAT_ID"], self.env_chat_id)
        self.assertEqual(c["TOKEN"], self.env_token)
        self.assertEqual(c["MONGO_USER"], self.env_mongo_initdb_root_username)
        self.assertEqual(c["MONGO_PASS"], self.env_mongo_initdb_root_password) 
Example #19
Source File: app.py    From browsertrix with MIT License 5 votes vote down vote up
def init():
    """ Init the application and add routes """

    logging.basicConfig(format='%(asctime)s: [%(levelname)s]: %(message)s',
                        level=logging.DEBUG)

    global theconfig
    theconfig = get_config()

    global rc
    rc = init_redis(theconfig)

    app = default_app()

    return app 
Example #20
Source File: db_sqlite.py    From audio-fingerprint-identifying-python with MIT License 5 votes vote down vote up
def connect(self):
    config = get_config()

    self.conn = sqlite3.connect(config['db.file'])
    self.conn.text_factory = str

    self.cur = self.conn.cursor()

    print(colored('sqlite - connection opened','white',attrs=['dark'])) 
Example #21
Source File: db_mongo.py    From audio-fingerprint-identifying-python with MIT License 5 votes vote down vote up
def connect(self):
    config = get_config()

    self.client = MongoClient(config['db.dsn'])
    self.db = self.client[config['db.database']] 
Example #22
Source File: main.py    From CausalGAN with MIT License 5 votes vote down vote up
def get_model(config=None):
    if not None:
        config, unparsed = get_config()
    return get_trainer(config) 
Example #23
Source File: main.py    From CausalGAN with MIT License 4 votes vote down vote up
def get_trainer():
    print('tf: resetting default graph!')
    tf.reset_default_graph()#for repeated calls in ipython


    ####GET CONFIGURATION####
    #TODO:load configurations from previous model when loading previous model
    ##if load_path:
        #load config files from dir
    #except if pt_load_path, get cc_config from before
    #overwrite is_train, is_pretrain with current args--sort of a mess

    ##else:
    config,_=get_config()
    cc_config,_=get_cc_config()
    dcgan_config,_=get_dcgan_config()
    began_config,_=get_began_config()

    ###SEEDS###
    np.random.seed(config.seed)
    #tf.set_random_seed(config.seed) # Not working right now.

    prepare_dirs_and_logger(config)
    if not config.load_path:
        print('saving config because load path not given')
        save_configs(config,cc_config,dcgan_config,began_config)

    #Resolve model differences and batch_size
    if config.model_type:
        if config.model_type=='dcgan':
            config.batch_size=dcgan_config.batch_size
            cc_config.batch_size=dcgan_config.batch_size # make sure the batch size of cc is the same as the image model
            config.Model=CausalGAN.CausalGAN
            model_config=dcgan_config
        if config.model_type=='began':
            config.batch_size=began_config.batch_size
            cc_config.batch_size=began_config.batch_size # make sure the batch size of cc is the same as the image model
            config.Model=CausalBEGAN.CausalBEGAN
            model_config=began_config

    else:#no image model
        model_config=None
        config.batch_size=cc_config.batch_size

        if began_config.is_train or dcgan_config.is_train:
            raise ValueError('need to specify model_type for is_train=True')

    #Interpret causal_model keyword
    cc_config.graph=get_causal_graph(config.causal_model)

    #Builds and loads specified models:
    trainer=Trainer(config,cc_config,model_config)
    return trainer