Python random.seed() Examples

The following are 30 code examples of random.seed(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module random , or try the search function .
Example #1
Source File: train.py    From mmdetection with Apache License 2.0 9 votes vote down vote up
def set_random_seed(seed, deterministic=False):
    """Set random seed.

    Args:
        seed (int): Seed to be used.
        deterministic (bool): Whether to set the deterministic option for
            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
            to True and `torch.backends.cudnn.benchmark` to False.
            Default: False.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False 
Example #2
Source File: demo_sampler_wrapper.py    From robosuite with MIT License 6 votes vote down vote up
def sample(self):
        """
        This is the core sampling method. Samples a state from a
        demonstration, in accordance with the configuration.
        """

        # chooses a sampling scheme randomly based on the mixing ratios
        seed = random.uniform(0, 1)
        ratio = np.cumsum(self.scheme_ratios)
        ratio = ratio > seed
        for i, v in enumerate(ratio):
            if v:
                break

        sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]])
        return sample_method() 
Example #3
Source File: validate_submission.py    From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def main(args):
  print_in_box('Validating submission ' + args.submission_filename)
  random.seed()
  temp_dir = args.temp_dir
  delete_temp_dir = False
  if not temp_dir:
    temp_dir = tempfile.mkdtemp()
    logging.info('Created temporary directory: %s', temp_dir)
    delete_temp_dir = True
  validator = validate_submission_lib.SubmissionValidator(temp_dir,
                                                          args.use_gpu)
  if validator.validate_submission(args.submission_filename,
                                   args.submission_type):
    print_in_box('Submission is VALID!')
  else:
    print_in_box('Submission is INVALID, see log messages for details')
  if delete_temp_dir:
    logging.info('Deleting temporary directory: %s', temp_dir)
    subprocess.call(['rm', '-rf', temp_dir]) 
Example #4
Source File: utils.py    From tpu_pretrain with Apache License 2.0 6 votes vote down vote up
def init(args):
    # init logger
    log_format = '%(asctime)-10s: %(message)s'
    if args.log_file is not None and args.log_file != "":
        Path(args.log_file).parent.mkdir(parents=True, exist_ok=True)
        logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format)
        logging.warning(f'This will get logged to file: {args.log_file}')
    else:
        logging.basicConfig(level=logging.INFO, format=log_format)

    # create output dir
    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
    assert 'bert' in args.output_dir.name, \
        '''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type'''

    args.output_dir.mkdir(parents=True, exist_ok=True)

    # set random seeds
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed) 
Example #5
Source File: strainsimulationwrapper.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def _get_simulate_cmd(self, directory_strains, filepath_genome, filepath_gff):
		"""
		Get system command to start simulation. Change directory to the strain directory and start simulating strains.

		@param directory_strains: Directory for the simulated strains
		@type directory_strains: str | unicode
		@param filepath_genome: Genome to get simulated strains of
		@type filepath_genome: str | unicode
		@param filepath_gff: gff file with gene annotations
		@type filepath_gff: str | unicode

		@return: System command line
		@rtype: str
		"""
		cmd_run_simujobrun = "cd {dir}; {executable} {filepath_genome} {filepath_gff} {seed}" + " >> {log}"
		cmd = cmd_run_simujobrun.format(
			dir=directory_strains,
			executable=self._executable_sim,
			filepath_genome=filepath_genome,
			filepath_gff=filepath_gff,
			seed=self._get_seed(),
			log=os.path.join(directory_strains, os.path.basename(filepath_genome) + ".sim.log")
		)
		return cmd 
Example #6
Source File: fastaanonymizer.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def __init__(self, logfile=None, verbose=True, debug=False, seed=None, tmp_dir=None):
		"""
			Anonymize fasta sequences

			@attention: 'shuf' is used which loads everything into memory!

			@param logfile: file handler or file path to a log file
			@type logfile: file | io.FileIO | StringIO.StringIO | str | unicode
			@param verbose: Not verbose means that only warnings and errors will be past to stream
			@type verbose: bool
			@param debug: more output and files are kept, manual clean up required
			@type debug: bool
			@param seed: The seed written to the random_source file used by the 'shuf' command
			@type seed: long | int | float | str | unicode
			@param tmp_dir: directory for temporary files, like the random_source file for 'shuf'
			@type tmp_dir: str | unicode

			@return: None
			@rtype: None
		"""
		assert isinstance(verbose, bool)
		assert isinstance(debug, bool)
		assert seed is None or isinstance(seed, (long, int, float, basestring))
		assert tmp_dir is None or isinstance(tmp_dir, basestring)
		if tmp_dir is not None:
			assert self.validate_dir(tmp_dir)
		else:
			tmp_dir = tempfile.gettempdir()
		self._tmp_dir = tmp_dir
		super(FastaAnonymizer, self).__init__(logfile, verbose, debug, label="FastaAnonymizer")

		if seed is not None:
			random.seed(seed)

		script_dir = os.path.dirname(self.get_full_path(__file__))
		self._anonymizer = os.path.join(script_dir, "anonymizer.py")
		self._fastastreamer = os.path.join(script_dir, "fastastreamer.py")
		assert self.validate_file(self._anonymizer)
		assert self.validate_file(self._fastastreamer) 
Example #7
Source File: estimator_utils.py    From EDeN with MIT License 6 votes vote down vote up
def make_train_test_sets(pos_graphs, neg_graphs,
                         test_proportion=.3, random_state=2):
    """make_train_test_sets."""
    random.seed(random_state)
    random.shuffle(pos_graphs)
    random.shuffle(neg_graphs)
    pos_dim = len(pos_graphs)
    neg_dim = len(neg_graphs)
    tr_pos_graphs = pos_graphs[:-int(pos_dim * test_proportion)]
    te_pos_graphs = pos_graphs[-int(pos_dim * test_proportion):]
    tr_neg_graphs = neg_graphs[:-int(neg_dim * test_proportion)]
    te_neg_graphs = neg_graphs[-int(neg_dim * test_proportion):]
    tr_graphs = tr_pos_graphs + tr_neg_graphs
    te_graphs = te_pos_graphs + te_neg_graphs
    tr_targets = [1] * len(tr_pos_graphs) + [0] * len(tr_neg_graphs)
    te_targets = [1] * len(te_pos_graphs) + [0] * len(te_neg_graphs)
    tr_graphs, tr_targets = paired_shuffle(tr_graphs, tr_targets)
    te_graphs, te_targets = paired_shuffle(te_graphs, te_targets)
    return (tr_graphs, np.array(tr_targets)), (te_graphs, np.array(te_targets)) 
Example #8
Source File: run_dqn_lander.py    From cs294-112_hws with MIT License 6 votes vote down vote up
def lander_learn(env,
                 session,
                 num_timesteps,
                 seed):

    optimizer = lander_optimizer()
    stopping_criterion = lander_stopping_criterion(num_timesteps)
    exploration_schedule = lander_exploration_schedule(num_timesteps)

    dqn.learn(
        env=env,
        session=session,
        exploration=lander_exploration_schedule(num_timesteps),
        stopping_criterion=lander_stopping_criterion(num_timesteps),
        double_q=True,
        **lander_kwargs()
    )
    env.close() 
Example #9
Source File: populationdistribution.py    From CAMISIM with Apache License 2.0 6 votes vote down vote up
def __init__(self, logfile=None, verbose=True, debug=False, seed=None):
		"""
			Initialize instance with seed

			@attention:

			@param logfile: file handler or file path to a log file
			@type logfile: basestring | file | io.FileIO | StringIO.StringIO
			@param verbose: Not verbose means that only warnings and errors will be past to stream
			@type verbose: bool
			@param debug: If True logger will output DEBUG messages
			@type debug: bool
			@param seed: The seed used for initiation of the 'random' module
			@type seed: long | int | float | str | unicode

			@return: None
			@rtype: None
		"""
		assert isinstance(verbose, bool)
		assert isinstance(debug, bool)
		super(PopulationDistribution, self).__init__(logfile, verbose, debug)

		if seed is not None:
			random.seed(seed) 
Example #10
Source File: trainer_lib.py    From fine-lm with MIT License 5 votes vote down vote up
def set_random_seed(seed):
  tf.set_random_seed(seed)
  random.seed(seed)
  np.random.seed(seed) 
Example #11
Source File: subject_verb_agreement.py    From fine-lm with MIT License 5 votes vote down vote up
def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01):
  """Loads exampls from the tsv file.

  Args:
    tmp_dir: temp directory.
    prop_train: proportion of the train data
    prop_val: proportion of the validation data

  Returns:
    All examples in the dataset pluse train, test, and development splits.

  """

  infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL)
  tf.logging.info('Loading examples')

  all_examples = []
  for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')):
    if i % 100000 == 0:
      tf.logging.info('%d examples have been loaded....' % i)
    ex = {x: int(y) if y.isdigit() else y for x, y in d.items()}
    all_examples.append(ex)

  random.seed(1)
  random.shuffle(all_examples)
  n_train = int(len(all_examples) * prop_train)
  n_val = n_train + int(len(all_examples) * prop_val)
  train = all_examples[:n_train]
  val = all_examples[n_train:n_val]
  test = []
  for e in all_examples[n_val:]:
    if e['n_intervening'] == e['n_diff_intervening']:
      test.append(e)

  return all_examples, train, val, test 
Example #12
Source File: t2t_datagen.py    From fine-lm with MIT License 5 votes vote down vote up
def set_random_seed():
  """Set the random seed from flag everywhere."""
  tf.set_random_seed(FLAGS.random_seed)
  random.seed(FLAGS.random_seed)
  np.random.seed(FLAGS.random_seed) 
Example #13
Source File: misc_util.py    From lirpg with MIT License 5 votes vote down vote up
def set_global_seeds(i):
    try:
        import tensorflow as tf
    except ImportError:
        pass
    else:
        tf.set_random_seed(i)
    np.random.seed(i)
    random.seed(i) 
Example #14
Source File: get_references_web_single_group.py    From fine-lm with MIT License 5 votes vote down vote up
def get_urls_for_shard_group(urls_dir, shard_id, group_id):
  shard_urls = get_urls_for_shard(urls_dir, shard_id)

  # Deterministic sort and shuffle to prepare for sharding
  shard_urls.sort()
  random.seed(123)
  random.shuffle(shard_urls)
  groups = shard(shard_urls, int(math.ceil(len(shard_urls) / URLS_PER_CLIENT)))
  group_urls = groups[group_id]
  if FLAGS.debug_num_urls:
    group_urls = group_urls[:FLAGS.debug_num_urls]
  return group_urls 
Example #15
Source File: text_encoder_test.py    From fine-lm with MIT License 5 votes vote down vote up
def test_long_tokens(self):
    """Subword tokenization should still run efficiently with long tokens.

    To make it run efficiently, we need to use the `max_subtoken_length`
    argument when calling SubwordTextEncoder.build_to_target_size.
    """
    token_length = 4000
    num_tokens = 50
    target_vocab_size = 600
    max_subtoken_length = 10  # Set this to `None` to get problems.
    max_count = 500

    # Generate some long random strings.
    random.seed(0)
    long_tokens = []
    for _ in range(num_tokens):
      long_token = "".join([random.choice(string.ascii_uppercase)
                            for _ in range(token_length)])
      long_tokens.append(long_token)

    corpus = " ".join(long_tokens)
    token_counts = collections.Counter(corpus.split(" "))
    alphabet = set(corpus) - {" "}

    encoder = text_encoder.SubwordTextEncoder.build_to_target_size(
        target_vocab_size, token_counts, 1, max_count, num_iterations=1,
        max_subtoken_length=max_subtoken_length)

    # All vocabulary elements are in the alphabet and subtoken strings even
    # if we requested a smaller vocabulary to assure all expected strings
    # are encodable.
    self.assertTrue(alphabet.issubset(encoder._alphabet))
    for a in alphabet:
      self.assertIn(a, encoder.all_subtoken_strings) 
Example #16
Source File: thisplace.py    From ThisPlace with MIT License 5 votes vote down vote up
def get_words(fname):
    lines = open(fname)
    words = []
    for word in lines:
        words.append(word.strip())

    lines.close()
    random.seed(634634)
    random.shuffle(words)
    words = words[:2**15]
    assert len(words) == len(set(words))
    return words

# These read like alien races from a sci-fi book 
Example #17
Source File: loader_factory.py    From Pytorch-Networks with MIT License 5 votes vote down vote up
def test_():
    import matplotlib.pyplot as plt 
    import random
    
    
    random.seed(0)
    torch.manual_seed(0)
    
    train_loader = get_loader("cub200_2011", 
      
       ['/home/ikenaga/Public/CUB_200_2011/images.txt',
       '/home/ikenaga/Public/CUB_200_2011/train_test_split.txt',
       '/home/ikenaga/Public/CUB_200_2011/images/'],
        'self_test',
        '/home/ikenaga/Public/CUB_200_2011/image_class_labels.txt',
        224)
    for i, (img_ori_tensor, label) in enumerate(train_loader):
        # img_ori_tensor_0 = img_ori_tensor[0].numpy().astype(np.uint8)
        # img_ori_tensor_1 = img_ori_tensor[1].numpy().astype(np.uint8)
        img_ori_tensor_0 = inverse_preprocess(img_ori_tensor[0])
        img_ori_tensor_1 = inverse_preprocess(img_ori_tensor[1])
       
        fig = plt.figure()
        a = fig.add_subplot(1,2,1)
        a.set_title('img_ori_tensor_0%d'%label[0].numpy())
        plt.imshow(img_ori_tensor_0)
        a = fig.add_subplot(1,2,2)
        a.set_title('img_ori_tensor_1%d'%label[1].numpy())
        plt.imshow(img_ori_tensor_1)
       
        plt.show() 
Example #18
Source File: run_dqn_lander.py    From cs294-112_hws with MIT License 5 votes vote down vote up
def main():
    # Run training
    seed = 4565 # you may want to randomize this
    print('random seed = %d' % seed)
    env = get_env(seed)
    session = get_session()
    set_global_seeds(seed)
    lander_learn(env, session, num_timesteps=500000, seed=seed) 
Example #19
Source File: run_dqn_atari.py    From cs294-112_hws with MIT License 5 votes vote down vote up
def set_global_seeds(i):
    try:
        import tensorflow as tf
    except ImportError:
        pass
    else:
        tf.set_random_seed(i)
    np.random.seed(i)
    random.seed(i) 
Example #20
Source File: utils.py    From pruning_yolov3 with GNU General Public License v3.0 5 votes vote down vote up
def init_seeds(seed=0):
    random.seed(seed)
    np.random.seed(seed)
    torch_utils.init_seeds(seed=seed) 
Example #21
Source File: __init__.py    From OpenNRE with MIT License 5 votes vote down vote up
def fix_seed(seed=12345):
    import torch
    import numpy as np
    import random 
    torch.manual_seed(seed) # cpu
    torch.cuda.manual_seed(seed) # gpu
    np.random.seed(seed) # numpy
    random.seed(seed) # random and transforms
    torch.backends.cudnn.deterministic=True # cudnn 
Example #22
Source File: create_pet_tf_record.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def main(_):
  data_dir = FLAGS.data_dir
  label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)

  logging.info('Reading from Pet dataset.')
  image_dir = os.path.join(data_dir, 'images')
  annotations_dir = os.path.join(data_dir, 'annotations')
  examples_path = os.path.join(annotations_dir, 'trainval.txt')
  examples_list = dataset_util.read_examples_list(examples_path)

  # Test images are not included in the downloaded data set, so we shall perform
  # our own split.
  random.seed(42)
  random.shuffle(examples_list)
  num_examples = len(examples_list)
  num_train = int(0.7 * num_examples)
  train_examples = examples_list[:num_train]
  val_examples = examples_list[num_train:]
  logging.info('%d training and %d validation examples.',
               len(train_examples), len(val_examples))

  train_output_path = os.path.join(FLAGS.output_dir, 'pet_train.record')
  val_output_path = os.path.join(FLAGS.output_dir, 'pet_val.record')
  create_tf_record(train_output_path, label_map_dict, annotations_dir,
                   image_dir, train_examples)
  create_tf_record(val_output_path, label_map_dict, annotations_dir,
                   image_dir, val_examples) 
Example #23
Source File: document_generators.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def documents(dataset='train',
              include_unlabeled=False,
              include_validation=False):
  """Generates Documents based on FLAGS.dataset.

  Args:
    dataset: str, identifies folder within IMDB data directory, test or train.
    include_unlabeled: bool, whether to include the unsup directory. Only valid
      when dataset=train.
    include_validation: bool, whether to include validation data.

  Yields:
    Document

  Raises:
    ValueError: if include_unlabeled is true but dataset is not 'train'
  """

  if include_unlabeled and dataset != 'train':
    raise ValueError('If include_unlabeled=True, must use train dataset')

  # Set the random seed so that we have the same validation set when running
  # gen_data and gen_vocab.
  random.seed(302)

  ds = FLAGS.dataset
  if ds == 'imdb':
    docs_gen = imdb_documents
  elif ds == 'dbpedia':
    docs_gen = dbpedia_documents
  elif ds == 'rcv1':
    docs_gen = rcv1_documents
  elif ds == 'rt':
    docs_gen = rt_documents
  else:
    raise ValueError('Unrecognized dataset %s' % FLAGS.dataset)

  for doc in docs_gen(dataset, include_unlabeled, include_validation):
    yield doc 
Example #24
Source File: download_and_convert_flowers.py    From DOTA_models with Apache License 2.0 5 votes vote down vote up
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  if _dataset_exists(dataset_dir):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
  photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
  class_names_to_ids = dict(zip(class_names, range(len(class_names))))

  # Divide into train and test:
  random.seed(_RANDOM_SEED)
  random.shuffle(photo_filenames)
  training_filenames = photo_filenames[_NUM_VALIDATION:]
  validation_filenames = photo_filenames[:_NUM_VALIDATION]

  # First, convert the training and validation sets.
  _convert_dataset('train', training_filenames, class_names_to_ids,
                   dataset_dir)
  _convert_dataset('validation', validation_filenames, class_names_to_ids,
                   dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(class_names)), class_names))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Flowers dataset!') 
Example #25
Source File: kmeans.py    From discomll with Apache License 2.0 5 votes vote down vote up
def map_init(interface, params):
    """Intialize random number generator with given seed `params.seed`."""
    import random
    random.seed(params['seed'])
    return params 
Example #26
Source File: distributed_weighted_forest_rand.py    From discomll with Apache License 2.0 5 votes vote down vote up
def map_init(interface, params):
    """Intialize random number generator with given seed `params.seed`."""
    import numpy as np
    import random
    np.random.seed(params['seed'])
    random.seed(params['seed'])
    return params 
Example #27
Source File: distributed_weighted_forest.py    From discomll with Apache License 2.0 5 votes vote down vote up
def map_init(interface, params):
    """Intialize random number generator with given seed `params.seed`."""
    import numpy as np
    import random
    np.random.seed(params['seed'])
    random.seed(params['seed'])

    return params 
Example #28
Source File: forest_distributed_decision_trees.py    From discomll with Apache License 2.0 5 votes vote down vote up
def map_init(interface, params):
    """Intialize random number generator with given seed `params.seed`."""
    import random
    import numpy as np
    random.seed(params['seed'])
    np.random.seed(params['seed'])
    return params 
Example #29
Source File: distributed_random_forest.py    From discomll with Apache License 2.0 5 votes vote down vote up
def map_init(interface, params):
    """Intialize random number generator with given seed `params.seed`."""
    import random
    import numpy as np
    random.seed(params['seed'])
    np.random.seed(params['seed'])
    return params 
Example #30
Source File: common.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 5 votes vote down vote up
def random_seed(seed=None):
    """
    Runs a code block with a new seed for np, mx and python's random.

    Parameters
    ----------

    seed : the seed to pass to np.random, mx.random and python's random.

    To impose rng determinism, invoke e.g. as in:

    with random_seed(1234):
        ...

    To impose rng non-determinism, invoke as in:

    with random_seed():
        ...

    Upon conclusion of the block, the rng's are returned to
    a state that is a function of their pre-block state, so
    any prior non-determinism is preserved.

    """

    try:
        next_seed = np.random.randint(0, np.iinfo(np.int32).max)
        if seed is None:
            np.random.seed()
            seed = np.random.randint(0, np.iinfo(np.int32).max)
        logger = default_logger()
        logger.debug('Setting np, mx and python random seeds = %s', seed)
        np.random.seed(seed)
        mx.random.seed(seed)
        random.seed(seed)
        yield
    finally:
        # Reinstate prior state of np.random and other generators
        np.random.seed(next_seed)
        mx.random.seed(next_seed)
        random.seed(next_seed)