Python random.seed() Examples

The following are code examples for showing how to use random.seed(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: pyblish-win   Author: pyblish   File: sortperf.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def main():
    """Main program when invoked as a script.

    One argument: tabulate a single row.
    Two arguments: tabulate a range (inclusive).
    Extra arguments are used to seed the random generator.

    """
    # default range (inclusive)
    k1 = 15
    k2 = 20
    if sys.argv[1:]:
        # one argument: single point
        k1 = k2 = int(sys.argv[1])
        if sys.argv[2:]:
            # two arguments: specify range
            k2 = int(sys.argv[2])
            if sys.argv[3:]:
                # derive random seed from remaining arguments
                x = 1
                for a in sys.argv[3:]:
                    x = 69069 * x + hash(a)
                random.seed(x)
    r = range(k1, k2+1)                 # include the end point
    tabulate(r) 
Example 2
Project: Automated-Social-Annotation   Author: acadTags   File: SVM.py    MIT License 6 votes vote down vote up
def display_for_qualitative_evaluation(modelToEval, evalX_embedded, evalX,evalY,vocabulary_index2word,vocabulary_index2word_label):
    prediction_str=""
    #generate the doc indexes same as for the deep learning models.
    number_examples=len(evalY)
    rn_dict={}
    rn.seed(1) # set the seed to produce same documents for prediction
    batch_size=128
    for i in range(0,500):
        batch_chosen=rn.randint(0,number_examples//batch_size)
        x_chosen=rn.randint(0,batch_size)
        #rn_dict[(batch_chosen*batch_size,x_chosen)]=1
        rn_dict[batch_chosen*batch_size+x_chosen]=1
        
    y_pred = modelToEval.predict(evalX_embedded)
    y_true = np.asarray(evalY)    
    for i in range(len(y_pred)):
        label_predicted = np.where(y_pred[i]==1)[0]
        if rn_dict.get(i) == 1:
            doc = 'doc: ' + ' '.join(display_results(evalX[i],vocabulary_index2word))
            pred = 'prediction-svm: ' + ' '.join(display_results(label_predicted,vocabulary_index2word_label))
            get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
            label = 'labels: ' + ' '.join(display_results(get_indexes(1,evalY[i]),vocabulary_index2word_label))
            prediction_str = prediction_str + '\n' + doc + '\n' + pred + '\n' + label + '\n'
    
    return prediction_str 
Example 3
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 6 votes vote down vote up
def initialize_session(logdir, seed=None):
    """Create a session and saver initialized from a checkpoint if found."""
    if not seed ==0:
        numpy.random.seed(seed=seed)



    config = tf.ConfigProto(

    )
    # config.gpu_options.allow_growth = True
    logdir = os.path.expanduser(logdir)
    checkpoint = tf.train.latest_checkpoint(logdir)
    saver = tf.train.Saver()
    with tf.Session(config=config) as sess:
        if checkpoint:
            print('Load checkpoint {}.'.format(checkpoint))
            saver.restore(sess, checkpoint)
        else:
            print('Initialize new model.')
            os.makedirs(logdir, exist_ok=True)
            sess.run(tf.global_variables_initializer())
        yield sess, saver 
Example 4
Project: mmdetection   Author: open-mmlab   File: train.py    Apache License 2.0 6 votes vote down vote up
def set_random_seed(seed, deterministic=False):
    """Set random seed.

    Args:
        seed (int): Seed to be used.
        deterministic (bool): Whether to set the deterministic option for
            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
            to True and `torch.backends.cudnn.benchmark` to False.
            Default: False.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False 
Example 5
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 6 votes vote down vote up
def train_valid_split(dataset, test_size=0.25, shuffle=False, random_seed=0):
    """ Return a list of splitted indices from a DataSet.
    Indices can be used with DataLoader to build a train and validation set.

    Arguments:
        A Dataset
        A test_size, as a float between 0 and 1 (percentage split) or as an int (fixed number split)
        Shuffling True or False
        Random seed
    """
    length = dataset.__len__()
    indices = list(range(1, length))

    if shuffle == True:
        random.seed(random_seed)
        random.shuffle(indices)

    if type(test_size) is float:
        split = floor(test_size * length)
    elif type(test_size) is int:
        split = test_size
    else:
        raise ValueError('%s should be an int or a float' % str)
    return indices[split:], indices[:split] 
Example 6
Project: neural-fingerprinting   Author: StephanZheng   File: validate_submission.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main(args):
  print_in_box('Validating submission ' + args.submission_filename)
  random.seed()
  temp_dir = args.temp_dir
  delete_temp_dir = False
  if not temp_dir:
    temp_dir = tempfile.mkdtemp()
    logging.info('Created temporary directory: %s', temp_dir)
    delete_temp_dir = True
  validator = validate_submission_lib.SubmissionValidator(temp_dir,
                                                          args.use_gpu)
  if validator.validate_submission(args.submission_filename,
                                   args.submission_type):
    print_in_box('Submission is VALID!')
  else:
    print_in_box('Submission is INVALID, see log messages for details')
  if delete_temp_dir:
    logging.info('Deleting temporary directory: %s', temp_dir)
    subprocess.call(['rm', '-rf', temp_dir]) 
Example 7
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: linode.py    MIT License 6 votes vote down vote up
def randompass():
    '''
    Generate a long random password that comply to Linode requirements
    '''
    # Linode API currently requires the following:
    # It must contain at least two of these four character classes:
    # lower case letters - upper case letters - numbers - punctuation
    # we play it safe :)
    import random
    import string
    # as of python 2.4, this reseeds the PRNG from urandom
    random.seed()
    lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
    upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
    number = ''.join(random.choice(string.digits) for x in range(6))
    punct = ''.join(random.choice(string.punctuation) for x in range(6))
    p = lower + upper + number + punct
    return ''.join(random.sample(p, len(p))) 
Example 8
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 6 votes vote down vote up
def test_get_error(self, client, request):
        test_func = ModelAioHttpJobs._test
        def fin():
            ModelAioHttpJobs._test = test_func
        request.addfinalizer(fin)

        async def callback(*args, **kwargs):
            raise Exception('test')

        ModelAioHttpJobs._test = callback
        random.seed(0)
        client = await client
        await client.post( '/')

        query = {'job_hash': 'e3e70682c2094cac629f6fbed82c07cd'}
        while True:
            resp = await client.get( '/', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == {'message': 'test', 'name': 'Exception'}
        assert (await resp.json())['status'] == 'error' 
Example 9
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 6 votes vote down vote up
def test_get_error(self, client, request):
        test_func = ModelAioHttpJobs._sync_test
        def fin():
            ModelAioHttpJobs._sync_test = test_func
        request.addfinalizer(fin)

        def callback(*args, **kwargs):
            raise Exception('test')

        ModelAioHttpJobs._sync_test = callback
        random.seed(0)
        client = await client
        await client.post( '/sync')

        query = {'job_hash': 'e3e70682c2094cac629f6fbed82c07cd'}
        while True:
            resp = await client.get( '/sync', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == {'message': 'test', 'name': 'Exception'}
        assert (await resp.json())['status'] == 'error' 
Example 10
Project: FCOS_GluonCV   Author: DetectionTeamUCAS   File: random.py    Apache License 2.0 6 votes vote down vote up
def seed(a=None):
    """Seed the generator for python builtin random, numpy.random, mxnet.random.

    This method is to control random state for mxnet related random functions.

    Note that this function cannot guarantee 100 percent reproducibility due to
    hardware settings.

    Parameters
    ----------
    a : int or 1-d array_like, optional
        Initialize internal state of the random number generator.
        If `seed` is not None or an int or a long, then hash(seed) is used instead.
        Note that the hash values for some types are nondeterministic.

    """
    pyrandom.seed(a)
    np.random.seed(a)
    mx.random.seed(a) 
Example 11
Project: pepperon.ai   Author: JonWiggins   File: utils.py    MIT License 6 votes vote down vote up
def random_unit_vector(dimensions, seed=None):
    """
    Returns a random unit vector in the given number of dimensions
    Created using Gausian Random vars

    :param dimensions: desired dimensions
    :param seed: nullable, random var see

    :return: random unit vecotor
    """
    raw = []
    magnitude = 0
    if seed:
        random.seed(seed)
        
    for count in range(dimensions):
        uniform1 = random.uniform(0, 1)
        uniform2 = random.uniform(0, 1)
        toadd = math.sqrt(-2 * math.log(uniform1)) * math.cos(2 * math.pi * uniform2)
        magnitude += (toadd ** 2)
        raw.append(toadd)
    
    magnitude = math.sqrt(magnitude)
    return [element / magnitude for element in raw] 
Example 12
Project: robosuite   Author: StanfordVL   File: demo_sampler_wrapper.py    MIT License 6 votes vote down vote up
def sample(self):
        """
        This is the core sampling method. Samples a state from a
        demonstration, in accordance with the configuration.
        """

        # chooses a sampling scheme randomly based on the mixing ratios
        seed = random.uniform(0, 1)
        ratio = np.cumsum(self.scheme_ratios)
        ratio = ratio > seed
        for i, v in enumerate(ratio):
            if v:
                break

        sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
        return sample_method() 
Example 13
Project: tpu_pretrain   Author: allenai   File: utils.py    Apache License 2.0 6 votes vote down vote up
def init(args):
    # init logger
    log_format = '%(asctime)-10s: %(message)s'
    if args.log_file is not None and args.log_file != "":
        Path(args.log_file).parent.mkdir(parents=True, exist_ok=True)
        logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format)
        logging.warning(f'This will get logged to file: {args.log_file}')
    else:
        logging.basicConfig(level=logging.INFO, format=log_format)

    # create output dir
    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
    assert 'bert' in args.output_dir.name, \
        '''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type'''

    args.output_dir.mkdir(parents=True, exist_ok=True)

    # set random seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed) 
Example 14
Project: cloud-profiler-python   Author: GoogleCloudPlatform   File: backoff.py    Apache License 2.0 6 votes vote down vote up
def __init__(self,
               min_envelope_sec=60.0,
               max_envelope_sec=3600.0,
               multiplier=1.3):
    """Constructs a Backoff object.

    Args:
      min_envelope_sec: A float specifying the initial minimum backoff duration
        envelope in seconds.
      max_envelope_sec: A float specifying the maximum backoff duration envelope
        in seconds.
      multiplier: A float specifying the factor for exponential increase.
    """
    random.seed()
    self._min_envelope_sec = min_envelope_sec
    self._max_envelope_sec = max_envelope_sec
    self._multiplier = multiplier
    self._current_envelope_sec = min_envelope_sec 
Example 15
Project: cs294-112_hws   Author: xuwd11   File: run_dqn_atari.py    MIT License 6 votes vote down vote up
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('exp_name', type=str)
    parser.add_argument('--gamma', type=float, default=0.99)
    parser.add_argument('--double_q', action='store_true')
    parser.add_argument('--gpu', type=int, default=0)
    args = parser.parse_args()
    
    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    
    if not(os.path.exists('data')):
        os.makedirs('data')
    
    # Get Atari games.
    task = gym.make('PongNoFrameskip-v4')

    # Run training
    seed = random.randint(0, 9999)
    print('random seed = %d' % seed)
    env = get_env(task, seed)
    session = get_session()
    atari_learn(env, session, args, num_timesteps=5e7) 
Example 16
Project: cs294-112_hws   Author: xuwd11   File: run_dqn_lander.py    MIT License 6 votes vote down vote up
def lander_learn(env,
                 session,
                 num_timesteps,
                 seed):

    optimizer = lander_optimizer()
    stopping_criterion = lander_stopping_criterion(num_timesteps)
    exploration_schedule = lander_exploration_schedule(num_timesteps)

    dqn.learn(
        env=env,
        session=session,
        exploration=lander_exploration_schedule(num_timesteps),
        stopping_criterion=lander_stopping_criterion(num_timesteps),
        double_q=True,
        **lander_kwargs()
    )
    env.close() 
Example 17
Project: mutatest   Author: EvanKepner   File: run.py    MIT License 5 votes vote down vote up
def get_sample(ggrp: GenomeGroup, ignore_coverage: bool) -> List[GenomeGroupTarget]:
    """Get the sample space for the mutation trials.

    This will attempt to use covered-targets as the default unless ``ignore_coverage`` is set
    to True. If the set .coverage file is not found then the total targets are returned instead.

    Args:
        ggrp: the Genome Group to generate the sample space of targets
        ignore_coverage: flag to ignore coverage if present

    Returns:
        Sorted list of Path-LocIndex pairs as complete sample space from the ``GenomeGroup``.
    """
    if ignore_coverage:
        LOGGER.info("Ignoring coverage file for sample space creation.")

    try:
        sample = ggrp.targets if ignore_coverage else ggrp.covered_targets

    except FileNotFoundError:
        LOGGER.info("Coverage file does not exist, proceeding to sample from all targets.")
        sample = ggrp.targets

    # sorted list used for repeat trials using random seed instead of set
    sort_by_keys = attrgetter(
        "source_path",
        "loc_idx.lineno",
        "loc_idx.col_offset",
        "loc_idx.end_lineno",
        "loc_idx.end_col_offset",
    )
    return sorted(sample, key=sort_by_keys) 
Example 18
Project: meta-transfer-learning   Author: erfaneshrati   File: run_miniimagenet.py    MIT License 5 votes vote down vote up
def main():
    """
    Load data and train a model on it.
    """
    args = argument_parser().parse_args()
    os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)

    random.seed(args.seed)

    train_set, val_set, test_set = read_dataset(DATA_DIR)
    if args.metatransfer:
        model = MiniImageNetMetaTransferModel(args.classes, **model_kwargs(args))
    else:
        model = MiniImageNetModel(args.classes, **model_kwargs(args))
    config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        if not args.pretrained:
            print('Training...')
            train(sess, model, train_set, test_set, args.checkpoint, **train_kwargs(args))
        else:
            print('Restoring from checkpoint...')
            tf.train.Saver().restore(sess, tf.train.latest_checkpoint(args.checkpoint))

        print('Evaluating...')
        eval_kwargs = evaluate_kwargs(args)
#        print('Train accuracy: ' + str(evaluate(sess, model, train_set, **eval_kwargs)))
#        print('Validation accuracy: ' + str(evaluate(sess, model, val_set, **eval_kwargs)))
        print('Test accuracy: ' + str(evaluate(sess, model, test_set, **eval_kwargs))) 
Example 19
Project: pyblish-win   Author: pyblish   File: forking.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, process_obj):
            sys.stdout.flush()
            sys.stderr.flush()
            self.returncode = None

            self.pid = os.fork()
            if self.pid == 0:
                if 'random' in sys.modules:
                    import random
                    random.seed()
                code = process_obj._bootstrap()
                sys.stdout.flush()
                sys.stderr.flush()
                os._exit(code) 
Example 20
Project: cyberdisc-bot   Author: CyberDiscovery   File: cyber.py    MIT License 5 votes vote down vote up
def generatebase64(seed: int) -> str:
    random.seed(seed)
    letters = string.ascii_letters + string.digits + "+/="
    return "".join(random.choices(letters, k=20)) 
Example 21
Project: Automated-Social-Annotation   Author: acadTags   File: BiGRU_train.py    MIT License 5 votes vote down vote up
def display_for_qualitative_evaluation(sess,modelToEval,label_sim_mat,label_sub_mat,evalX,evalY,batch_size,vocabulary_index2word,vocabulary_index2word_label,threshold=0.5):
    prediction_str=""
    number_examples=len(evalX)
    rn_dict={}
    rn.seed(1) # set the seed to produce same documents for prediction
    for i in range(0,500):
        batch_chosen=rn.randint(0,number_examples//batch_size)
        x_chosen=rn.randint(0,batch_size)
        rn_dict[(batch_chosen*batch_size,x_chosen)]=1
    for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):
        feed_dict = {modelToEval.input_x: evalX[start:end], modelToEval.dropout_keep_prob: 1, modelToEval.label_sim_matrix:label_sim_mat, modelToEval.label_sub_matrix:label_sub_mat}
        #if (start==0):
        #    print(evalX[start:end])
        if not FLAGS.multi_label_flag:
            feed_dict[modelToEval.input_y] = evalY[start:end]
        else:
            feed_dict[modelToEval.input_y_multilabel] = evalY[start:end]
        #curr_eval_loss, logits,curr_eval_acc= sess.run([modelToEval.loss_val,modelToEval.logits,modelToEval.accuracy],feed_dict)#curr_eval_acc--->modelToEval.accuracy
        curr_eval_loss,logits= sess.run([modelToEval.loss_val,modelToEval.logits],feed_dict)#curr_eval_acc--->modelToEval.accuracy
        for x in range(0,len(logits)):
            label_list_th = get_label_using_logits_threshold(logits[x],threshold)
            #label_list_topk = get_label_using_logits(logits[x], vocabulary_index2word_label,top_number=11)
            # display a particular prediction result
            #if x==x_chosen and start==batch_chosen*batch_size:
            if rn_dict.get((start,x)) == 1:
                # print('doc:',*display_results(evalX[start+x],vocabulary_index2word))
                # print('prediction-0.5:',*display_results(label_list_th,vocabulary_index2word_label))
                # #print('prediction-topk:',*display_results(label_list_topk,vocabulary_index2word_label))
                # get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
                # print('labels:',*display_results(get_indexes(1,evalY[start+x]),vocabulary_index2word_label))
                doc = 'doc: ' + ' '.join(display_results(evalX[start+x],vocabulary_index2word))
                pred = 'prediction-0.5: ' + ' '.join(display_results(label_list_th,vocabulary_index2word_label))
                #print('prediction-topk:',*display_results(label_list_topk,vocabulary_index2word_label))
                get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
                label = 'labels: ' + ' '.join(display_results(get_indexes(1,evalY[start+x]),vocabulary_index2word_label))
                prediction_str = prediction_str + '\n' + doc + '\n' + pred + '\n' + label + '\n'
                #print(prediction_str)
    return prediction_str
    
#从logits中取出前五 get label using logits 
Example 22
Project: Automated-Social-Annotation   Author: acadTags   File: HAN_train.py    MIT License 5 votes vote down vote up
def display_for_qualitative_evaluation(sess,modelToEval,label_sim_mat,label_sub_mat,evalX,evalY,batch_size,vocabulary_index2word,vocabulary_index2word_label,threshold=0.5):
    prediction_str=""
    number_examples=len(evalX)
    rn_dict={}
    rn.seed(1) # set the seed to produce same documents for prediction
    for i in range(0,500):
        batch_chosen=rn.randint(0,number_examples//batch_size)
        x_chosen=rn.randint(0,batch_size)
        rn_dict[(batch_chosen*batch_size,x_chosen)]=1
    for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):
        feed_dict = {modelToEval.input_x: evalX[start:end], modelToEval.dropout_keep_prob: 1, modelToEval.label_sim_matrix:label_sim_mat, modelToEval.label_sub_matrix:label_sub_mat}
        #if (start==0):
        #    print(evalX[start:end])
        if not FLAGS.multi_label_flag:
            feed_dict[modelToEval.input_y] = evalY[start:end]
        else:
            feed_dict[modelToEval.input_y_multilabel] = evalY[start:end]
        #curr_eval_loss, logits,curr_eval_acc= sess.run([modelToEval.loss_val,modelToEval.logits,modelToEval.accuracy],feed_dict)#curr_eval_acc--->modelToEval.accuracy
        curr_eval_loss,logits= sess.run([modelToEval.loss_val,modelToEval.logits],feed_dict)#curr_eval_acc--->modelToEval.accuracy
        for x in range(0,len(logits)):
            label_list_th = get_label_using_logits_threshold(logits[x],threshold)
            #label_list_topk = get_label_using_logits(logits[x], vocabulary_index2word_label,top_number=11)
            # display a particular prediction result
            #if x==x_chosen and start==batch_chosen*batch_size:
            if rn_dict.get((start,x)) == 1:
                # print('doc:',*display_results(evalX[start+x],vocabulary_index2word))
                # print('prediction-0.5:',*display_results(label_list_th,vocabulary_index2word_label))
                # #print('prediction-topk:',*display_results(label_list_topk,vocabulary_index2word_label))
                # get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
                # print('labels:',*display_results(get_indexes(1,evalY[start+x]),vocabulary_index2word_label))
                doc = 'doc: ' + ' '.join(display_results(evalX[start+x],vocabulary_index2word))
                pred = 'prediction-0.5: ' + ' '.join(display_results(label_list_th,vocabulary_index2word_label))
                #print('prediction-topk:',*display_results(label_list_topk,vocabulary_index2word_label))
                get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
                label = 'labels: ' + ' '.join(display_results(get_indexes(1,evalY[start+x]),vocabulary_index2word_label))
                prediction_str = prediction_str + '\n' + doc + '\n' + pred + '\n' + label + '\n'
                #print(prediction_str)
    return prediction_str
    
#从logits中取出前五 get label using logits 
Example 23
Project: Automated-Social-Annotation   Author: acadTags   File: LDA.py    MIT License 5 votes vote down vote up
def display_for_qualitative_evaluation(modelToEval, k_num_doc, mat_train, trainY, corpus_eval, evalX, evalY, vocabulary_index2word, vocabulary_index2word_label, hamming_q=FLAGS.ave_labels_per_doc):
    prediction_str=""
    #generate the doc indexes same as for the deep learning models.
    number_examples=len(evalY)
    rn_dict={}
    rn.seed(1) # set the seed to produce same documents for prediction
    batch_size=128
    for i in range(0,500):
        batch_chosen=rn.randint(0,number_examples//batch_size)
        x_chosen=rn.randint(0,batch_size)
        #rn_dict[(batch_chosen*batch_size,x_chosen)]=1
        rn_dict[batch_chosen*batch_size+x_chosen]=1
        
    # get eval-train document similarity matrix
    #mat_train = np.array(modelToEval[corpus]) #https://stackoverflow.com/questions/21322564/numpy-list-of-1d-arrays-to-2d-array
    #mat_train = mat_train[:,:,1] #https://stackoverflow.com/questions/37152031/numpy-remove-a-dimension-from-np-array
    #mat_eval = np.array(modelToEval[corpus_eval])
    mat_eval = np.array(modelToEval.get_document_topics(corpus_eval,minimum_probability=0.0))
    mat_eval = mat_eval[:,:,1]
    mat_sim_v_tr = cosine_similarity(mat_eval,mat_train) # a matrix (n_valid,n_train)
    
    y_true = np.asarray(evalY)    
    for i in range(len(mat_sim_v_tr)):
        doc_ind_list = get_doc_ind_from_vec(mat_sim_v_tr[i],k_num_doc=k_num_doc)
        #print(doc_ind_list)
        label_predicted = get_labels_from_docs(doc_ind_list,trainY)
        if rn_dict.get(i) == 1:
            doc = 'doc: ' + ' '.join(display_results(evalX[i],vocabulary_index2word))
            pred = 'prediction-lda: ' + ' '.join(display_results(label_predicted,vocabulary_index2word_label))
            get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
            label = 'labels: ' + ' '.join(display_results(get_indexes(1,evalY[i]),vocabulary_index2word_label))
            prediction_str = prediction_str + '\n' + doc + '\n' + pred + '\n' + label + '\n'
    
    return prediction_str 
Example 24
Project: nonogram-solver   Author: mprat   File: solver.py    MIT License 5 votes vote down vote up
def solve(
        nonogram, starter=None, add_puzzle_constraints=False):
    """
    Given a nonogram, determine whether it is solveable or not.
    If it is not solveable and add_puzzle_constraints is True,
    solve the puzzle while adding puzzle constraints to
    the given nonogram.

    Args:
        nonogram (nonogram_solver.nonogram.Nonogram): a nonogram
            represented in the nonogram_solver Nonogram type
        starter (int, optional): number of "starter" squares to seed
            the puzzle with before trying to solve it
        add_puzzle_constraints (bool, optional): if True and the puzzle
            is not solveable, add given squares to the nonogram to
            make it solveable.
    """

    nonogram_solver = NonogramSolver(nonogram)

    if starter is not None:
        for i in xrange(starter):
            nonogram_solver._pick_help_square()

    nonogram_solver._generate_solutions()

    if add_puzzle_constraints:
        while not nonogram_solver._puzzle_is_solved():
            # pick a random filled in square
            # generate solutions until the puzzle is solved
            nonogram_solver._pick_help_square()

            nonogram_solver._generate_solutions()

    return nonogram_solver._puzzle_is_solved(), nonogram_solver 
Example 25
Project: kuaa   Author: rafaelwerneck   File: subset.py    GNU General Public License v3.0 5 votes vote down vote up
def main(argv=sys.argv):
	dataset, subset_size, method, subset_file, rest_file = process_options(argv)
	#uncomment the following line to fix the random seed 
	#random.seed(0)
	selected_lines = []

	if method == 0:
		selected_lines = stratified_selection(dataset, subset_size)
	elif method == 1:
		selected_lines = random_selection(dataset, subset_size)

	#select instances based on selected_lines
	dataset = open(dataset,'r')
	prev_selected_linenum = -1
	for i in xrange(len(selected_lines)):
		for cnt in xrange(selected_lines[i]-prev_selected_linenum-1):
			line = dataset.readline()
			if rest_file: 
				rest_file.write(line)
		subset_file.write(dataset.readline())
		prev_selected_linenum = selected_lines[i]
	subset_file.close()

	if rest_file:
		for line in dataset: 
			rest_file.write(line)
		rest_file.close()
	dataset.close() 
Example 26
Project: Electrolyte_Analysis_FTIR   Author: Samuel-Buteau   File: Constant_run.py    MIT License 5 votes vote down vote up
def add_arguments(self, parser):
        # Positional arguments
        #parser.add_argument('poll_id', nargs='+', type=int)
        parser.add_argument('--mode', choices=['train_on_all_data',
                                               'cross_validation',
                                               'run_on_directory',
                                               'paper_figures'
                                               ])
        parser.add_argument('--logdir', required=True)
        parser.add_argument('--cross_validation_dir')
        parser.add_argument('--batch_size', type=int, default=32)
        parser.add_argument('--virtual_batches', type=int, default=2)
        parser.add_argument('--learning_rate', type=float, default=5e-3)
        parser.add_argument('--visuals', type=bool, default=False)
        parser.add_argument('--prob_supervised', type=float, default=0.9)
        parser.add_argument('--total_steps', type=int, default=30000)
        parser.add_argument('--checkpoint_every', type=int, default=2000)
        parser.add_argument('--log_every', type=int, default=2000)
        parser.add_argument('--dropout', type=float, default=0.05)
        parser.add_argument('--test_ratios', type=float, default=0.9)
        parser.add_argument('--noise_level', type=float, default=0.001)
        parser.add_argument('--largest_temp_exp', type=float, default=-1.)

        parser.add_argument('--prediction_coeff', type=float, default=5.)
        parser.add_argument('--normalization_coeff', type=float, default=1.)
        parser.add_argument('--positivity_coeff', type=float, default=1.)
        parser.add_argument('--small_x_coeff', type=float, default=.1)
        parser.add_argument('--global_norm_clip', type=float, default=10.)
        parser.add_argument('--seed', type=int, default=0)
        parser.add_argument('--datasets_file', default='compiled_datasets.file')
        parser.add_argument('--input_dir', default='InputData')
        parser.add_argument('--output_dir', default='OutputData') 
Example 27
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: main-tf-audio.py    MIT License 5 votes vote down vote up
def fixSeed(args):
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if args.use_cuda:
        torch.cuda.manual_seed(args.manualSeed)
        torch.cuda.manual_seed_all(args.manualSeed)

# Use CUDA 
Example 28
Project: Kaggle-Statoil-Challenge   Author: adodd202   File: utils.py    MIT License 5 votes vote down vote up
def fixSeed(args):
    random.seed(args.manualSeed)
    np.random.seed(args.manualSeed)
    torch.manual_seed(args.manualSeed)
    if args.use_cuda:
        torch.cuda.manual_seed(args.manualSeed)
        torch.cuda.manual_seed_all(args.manualSeed) 
Example 29
Project: sandsifter   Author: Battelle   File: mutator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def rand_byte():
    return chr(random.randint(0,255))

# generate an approximate seed instruction
# it is probably fine to just randomize the whole thing 
Example 30
Project: sandsifter   Author: Battelle   File: mutator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def init_mutator():
    random.seed()
    for i in range(1, SEEDS):
        s = insn()
        s.raw = generate_seed()
        q.append(s) 
Example 31
Project: subword-qac   Author: clovaai   File: generate.py    MIT License 5 votes vote down vote up
def main(args):
    logger.info(f"Args: {json.dumps(args.__dict__, indent=2, sort_keys=True)}")

    spm_path = os.path.join('spm', args.spm, "spm.model")
    logger.info(f"Loading tokenizer from {spm_path}")
    tokenizer = Tokenizer(spm_path)
    args.ntoken = ntoken = len(tokenizer)
    args.branching_factor = min([args.branching_factor, args.ntoken])
    logger.info(f"  Vocab size: {ntoken}")

    n_queries_str = f"{f'only {args.n_queries} samples' if args.n_queries else 'all'} quries from"
    logger.info(f"Reading a dataset ({n_queries_str} test.query.txt)")
    seen_set = set(read_data(os.path.join(args.data_dir, "train.query.txt"), min_len=args.min_len))
    test_data = read_data(os.path.join(args.data_dir, "test.query.txt"), min_len=args.min_len)
    if args.n_queries:
        random.seed(args.seed)
        test_data = random.sample(test_data, args.n_queries)
    n_seen_test_data = len([x for x in test_data if x in seen_set])
    n_unseen_test_data = len(test_data) - n_seen_test_data
    logger.info(f"  Number of test data: {len(test_data):8d} (seen {n_seen_test_data}, unseen {n_unseen_test_data})")

    logger.info(f"Loading model from {args.model_dir}")
    model = model_load(args.model_dir)
    model = model.to(device)

    logger.info('Generation starts!')
    with torch.no_grad():
        generate(model, tokenizer, test_data, args, seen_set=seen_set, calc_mrl=args.calc_mrl) 
Example 32
Project: neural-fingerprinting   Author: StephanZheng   File: validate_and_copy_submissions.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main(args):
  random.seed()
  temp_dir = tempfile.mkdtemp()
  logging.info('Created temporary directory: %s', temp_dir)
  validator = SubmissionValidator(
      source_dir=args.source_dir,
      target_dir=args.target_dir,
      temp_dir=temp_dir,
      do_copy=args.copy,
      use_gpu=args.use_gpu,
      containers_file=args.containers_file)
  validator.run()
  logging.info('Deleting temporary directory: %s', temp_dir)
  subprocess.call(['rm', '-rf', temp_dir]) 
Example 33
Project: neural-fingerprinting   Author: StephanZheng   File: worker.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def main(args):
  """Main function which runs worker."""
  title = '## Starting evaluation of round {0} ##'.format(args.round_name)
  logging.info('\n'
               + '#' * len(title) + '\n'
               + '#' * len(title) + '\n'
               + '##' + ' ' * (len(title)-2) + '##' + '\n'
               + title + '\n'
               + '#' * len(title) + '\n'
               + '#' * len(title) + '\n'
               + '##' + ' ' * (len(title)-2) + '##' + '\n')
  if args.blacklisted_submissions:
    logging.warning('BLACKLISTED SUBMISSIONS: %s', args.blacklisted_submissions)
  random.seed()
  logging.info('Running nvidia-docker to ensure that GPU works')
  shell_call(['docker', 'run', '--runtime=nvidia',
              '--rm', 'nvidia/cuda', 'nvidia-smi'])
  eval_worker = EvaluationWorker(
      worker_id=args.worker_id,
      storage_client=eval_lib.CompetitionStorageClient(
          args.project_id, args.storage_bucket),
      datastore_client=eval_lib.CompetitionDatastoreClient(
          args.project_id, args.round_name),
      storage_bucket=args.storage_bucket,
      round_name=args.round_name,
      dataset_name=args.dataset_name,
      blacklisted_submissions=args.blacklisted_submissions,
      num_defense_shards=args.num_defense_shards)
  eval_worker.run_work() 
Example 34
Project: CLRS   Author: JasonVann   File: CLRS.py    MIT License 5 votes vote down vote up
def stress_test_prep(n, lo, hi):
    data = []
    random.seed(1)
    
    for i in range(int(n)):
        a = random.randint(lo, hi)
        #b = random.randint(a, hi)
        #temp = (a, b)
        #print temp
        data.append(a)
    return data 
Example 35
Project: Image-Caption-Generator   Author: dabasajay   File: load_data.py    MIT License 5 votes vote down vote up
def data_generator(images, captions, tokenizer, max_length, batch_size, random_seed):
	# Setting random seed for reproducibility of results
	random.seed(random_seed)
	# Image ids
	image_ids = list(captions.keys())
	_count=0
	assert batch_size<= len(image_ids), 'Batch size must be less than or equal to {}'.format(len(image_ids))
	while True:
		if _count >= len(image_ids):
			# Generator exceeded or reached the end so restart it
			_count = 0
		# Batch list to store data
		input_img_batch, input_sequence_batch, output_word_batch = list(), list(), list()
		for i in range(_count, min(len(image_ids), _count+batch_size)):
			# Retrieve the image id
			image_id = image_ids[i]
			# Retrieve the image features
			image = images[image_id][0]
			# Retrieve the captions list
			captions_list = captions[image_id]
			# Shuffle captions list
			random.shuffle(captions_list)
			input_img, input_sequence, output_word = create_sequences(tokenizer, max_length, captions_list, image)
			# Add to batch
			for j in range(len(input_img)):
				input_img_batch.append(input_img[j])
				input_sequence_batch.append(input_sequence[j])
				output_word_batch.append(output_word[j])
		_count = _count + batch_size
		yield [[np.array(input_img_batch), np.array(input_sequence_batch)], np.array(output_word_batch)] 
Example 36
Project: flasky   Author: RoseOu   File: util.py    MIT License 5 votes vote down vote up
def seed():
    try:
        random.seed(os.urandom(64))
    except NotImplementedError:
        random.seed('%s.%s' % (time.time(), os.getpid())) 
Example 37
Project: mmi-tagger   Author: karlstratos   File: main.py    MIT License 5 votes vote down vote up
def main(args):
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    device = torch.device('cuda' if args.cuda else 'cpu')
    data = Data(args.data)
    model = MMIModel(len(data.w2i), len(data.c2i), args.num_labels, args.dim,
                     args.dim // 2, args.width, 1).to(device)
    logger = Logger(args.model + '.log', args.train)
    logger.log('python ' + ' '.join(sys.argv) + '\n')
    logger.log('Random seed: %d' % args.seed)
    control = Control(model, args.model, args.batch_size, device, logger)

    if args.train:
        acc, vm, zseqs, clustering = control.train(data, args.lr, args.epochs)

    elif os.path.exists(args.model):
        control.load_model()
        acc, vm, zseqs, clustering = control.evaluate(data)
        print('     acc: {:5.2f}'.format(acc))
        print('      vm: {:5.2f}'.format(vm))

    if args.pred:
        with open(args.pred, 'w') as f:
            for zseq in zseqs:
                f.write(' '.join([str(z) for z in zseq]) + '\n')

    if args.clusters:
        with open(args.clusters, 'w') as f:
            for z, cluster in enumerate(clustering):
                f.write(str(z) + '\t' + ' '.join([data.i2w[i] for i in
                                                  cluster]) + '\n') 
Example 38
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_post(self, client):
        random.seed(0)
        resp = await (await client).post('/')
        assert resp.status == 201
        assert await resp.json() == {'job_hash': 'e3e70682c2094cac629f6fbed82c07cd'} 
Example 39
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_success(self, client):
        random.seed(0)
        client = await client
        await client.post('/')

        query = {'job_hash': 'e3e70682c2094cac629f6fbed82c07cd'}
        while True:
            resp = await client.get('/', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == 'test'
        assert (await resp.json())['status'] == 'done' 
Example 40
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_last(self, client):
        random.seed(0)
        client = await client
        await client.post('/')

        query = {'job_hash': 'last'}
        while True:
            resp = await client.get('/', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == 'test'
        assert (await resp.json())['status'] == 'done' 
Example 41
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_all(self, client):
        random.seed(0)
        client = await client
        await client.post('/')

        while True:
            resp = await client.get('/')
            if not 'running' in (await resp.json()):
                break

        assert resp.status == 200
        assert (await resp.json())['done']['e3e70682c2094cac629f6fbed82c07cd']['result'] == 'test' 
Example 42
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_success(self, client):
        random.seed(0)
        client = await client
        await client.post('/sync')

        query = {'job_hash': 'e3e70682c2094cac629f6fbed82c07cd'}
        while True:
            resp = await client.get('/sync', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == 'test'
        assert (await resp.json())['status'] == 'done' 
Example 43
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_last(self, client):
        random.seed(0)
        client = await client
        await client.post('/sync')

        query = {'job_hash': 'last'}
        while True:
            resp = await client.get('/sync', params=query)
            if (await resp.json())['status'] != 'running':
                break

        assert resp.status == 200
        assert (await resp.json())['result'] == 'test'
        assert (await resp.json())['status'] == 'done' 
Example 44
Project: swaggerit   Author: dutradda   File: test_jobs_integration.py    MIT License 5 votes vote down vote up
def test_get_all(self, client):
        random.seed(0)
        client = await client
        await client.post('/sync')

        while True:
            resp = await client.get('/sync')
            if not 'running' in (await resp.json()):
                break

        assert resp.status == 200
        assert (await resp.json())['done']['e3e70682c2094cac629f6fbed82c07cd']['result'] == 'test' 
Example 45
Project: PO_2_MLSA   Author: jvollme   File: PO_2_MLSA.py    GNU General Public License v3.0 5 votes vote down vote up
def randomnumber():
	mylogger.debug("randomnumber()")
	#returns a random integer to use as seed for rxml and pyml
	random.seed()
	
	return random.randint(1, 2000) 
Example 46
Project: PO_2_MLSA   Author: jvollme   File: PO_2_MLSA_with_partials.py    GNU General Public License v3.0 5 votes vote down vote up
def randomnumber():
	mylogger.debug("randomnumber()")
	#returns a random integer to use as seed for rxml and pyml
	random.seed()
	
	return random.randint(1, 2000) 
Example 47
Project: synthetic-data-tutorial   Author: theodi   File: utils.py    MIT License 5 votes vote down vote up
def set_random_seed(seed=0):
    random.seed(seed)
    np.random.seed(seed) 
Example 48
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: Preprocessing.py    Apache License 2.0 5 votes vote down vote up
def local_split(train_index):
   random.seed(0)
   train_index = set(train_index)
   all_index = sorted(train_index)
   num_test = int(len(all_index) / 3)
   random.shuffle(all_index)
   train_set = set(all_index[num_test:])
   test_set = set(all_index[:num_test])
   return train_set, test_set 
Example 49
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: common.py    Apache License 2.0 5 votes vote down vote up
def default_logger():
    """A logger used to output seed information to nosetests logs."""
    logger = logging.getLogger(__name__)
    # getLogger() lookups will return the same logger, but only add the handler once.
    if not len(logger.handlers):
        handler = logging.StreamHandler(sys.stderr)
        handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
        logger.addHandler(handler)
        if (logger.getEffectiveLevel() == logging.NOTSET):
            logger.setLevel(logging.INFO)
    return logger 
Example 50
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: common.py    Apache License 2.0 5 votes vote down vote up
def random_seed(seed=None):
    """
    Runs a code block with a new seed for np, mx and python's random.

    Parameters
    ----------

    seed : the seed to pass to np.random, mx.random and python's random.

    To impose rng determinism, invoke e.g. as in:

    with random_seed(1234):
        ...

    To impose rng non-determinism, invoke as in:

    with random_seed():
        ...

    Upon conclusion of the block, the rng's are returned to
    a state that is a function of their pre-block state, so
    any prior non-determinism is preserved.

    """

    try:
        next_seed = np.random.randint(0, np.iinfo(np.int32).max)
        if seed is None:
            np.random.seed()
            seed = np.random.randint(0, np.iinfo(np.int32).max)
        logger = default_logger()
        logger.debug('Setting np, mx and python random seeds = %s', seed)
        np.random.seed(seed)
        mx.random.seed(seed)
        random.seed(seed)
        yield
    finally:
        # Reinstate prior state of np.random and other generators
        np.random.seed(next_seed)
        mx.random.seed(next_seed)
        random.seed(next_seed)