Python random.seed() Examples
The following are 30
code examples of random.seed().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
random
, or try the search function
.

Example #1
Source File: train.py From mmdetection with Apache License 2.0 | 9 votes |
def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False
Example #2
Source File: estimator_utils.py From EDeN with MIT License | 6 votes |
def make_train_test_sets(pos_graphs, neg_graphs, test_proportion=.3, random_state=2): """make_train_test_sets.""" random.seed(random_state) random.shuffle(pos_graphs) random.shuffle(neg_graphs) pos_dim = len(pos_graphs) neg_dim = len(neg_graphs) tr_pos_graphs = pos_graphs[:-int(pos_dim * test_proportion)] te_pos_graphs = pos_graphs[-int(pos_dim * test_proportion):] tr_neg_graphs = neg_graphs[:-int(neg_dim * test_proportion)] te_neg_graphs = neg_graphs[-int(neg_dim * test_proportion):] tr_graphs = tr_pos_graphs + tr_neg_graphs te_graphs = te_pos_graphs + te_neg_graphs tr_targets = [1] * len(tr_pos_graphs) + [0] * len(tr_neg_graphs) te_targets = [1] * len(te_pos_graphs) + [0] * len(te_neg_graphs) tr_graphs, tr_targets = paired_shuffle(tr_graphs, tr_targets) te_graphs, te_targets = paired_shuffle(te_graphs, te_targets) return (tr_graphs, np.array(tr_targets)), (te_graphs, np.array(te_targets))
Example #3
Source File: validate_submission.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 6 votes |
def main(args): print_in_box('Validating submission ' + args.submission_filename) random.seed() temp_dir = args.temp_dir delete_temp_dir = False if not temp_dir: temp_dir = tempfile.mkdtemp() logging.info('Created temporary directory: %s', temp_dir) delete_temp_dir = True validator = validate_submission_lib.SubmissionValidator(temp_dir, args.use_gpu) if validator.validate_submission(args.submission_filename, args.submission_type): print_in_box('Submission is VALID!') else: print_in_box('Submission is INVALID, see log messages for details') if delete_temp_dir: logging.info('Deleting temporary directory: %s', temp_dir) subprocess.call(['rm', '-rf', temp_dir])
Example #4
Source File: fastaanonymizer.py From CAMISIM with Apache License 2.0 | 6 votes |
def __init__(self, logfile=None, verbose=True, debug=False, seed=None, tmp_dir=None): """ Anonymize fasta sequences @attention: 'shuf' is used which loads everything into memory! @param logfile: file handler or file path to a log file @type logfile: file | io.FileIO | StringIO.StringIO | str | unicode @param verbose: Not verbose means that only warnings and errors will be past to stream @type verbose: bool @param debug: more output and files are kept, manual clean up required @type debug: bool @param seed: The seed written to the random_source file used by the 'shuf' command @type seed: long | int | float | str | unicode @param tmp_dir: directory for temporary files, like the random_source file for 'shuf' @type tmp_dir: str | unicode @return: None @rtype: None """ assert isinstance(verbose, bool) assert isinstance(debug, bool) assert seed is None or isinstance(seed, (long, int, float, basestring)) assert tmp_dir is None or isinstance(tmp_dir, basestring) if tmp_dir is not None: assert self.validate_dir(tmp_dir) else: tmp_dir = tempfile.gettempdir() self._tmp_dir = tmp_dir super(FastaAnonymizer, self).__init__(logfile, verbose, debug, label="FastaAnonymizer") if seed is not None: random.seed(seed) script_dir = os.path.dirname(self.get_full_path(__file__)) self._anonymizer = os.path.join(script_dir, "anonymizer.py") self._fastastreamer = os.path.join(script_dir, "fastastreamer.py") assert self.validate_file(self._anonymizer) assert self.validate_file(self._fastastreamer)
Example #5
Source File: populationdistribution.py From CAMISIM with Apache License 2.0 | 6 votes |
def __init__(self, logfile=None, verbose=True, debug=False, seed=None): """ Initialize instance with seed @attention: @param logfile: file handler or file path to a log file @type logfile: basestring | file | io.FileIO | StringIO.StringIO @param verbose: Not verbose means that only warnings and errors will be past to stream @type verbose: bool @param debug: If True logger will output DEBUG messages @type debug: bool @param seed: The seed used for initiation of the 'random' module @type seed: long | int | float | str | unicode @return: None @rtype: None """ assert isinstance(verbose, bool) assert isinstance(debug, bool) super(PopulationDistribution, self).__init__(logfile, verbose, debug) if seed is not None: random.seed(seed)
Example #6
Source File: strainsimulationwrapper.py From CAMISIM with Apache License 2.0 | 6 votes |
def _get_simulate_cmd(self, directory_strains, filepath_genome, filepath_gff): """ Get system command to start simulation. Change directory to the strain directory and start simulating strains. @param directory_strains: Directory for the simulated strains @type directory_strains: str | unicode @param filepath_genome: Genome to get simulated strains of @type filepath_genome: str | unicode @param filepath_gff: gff file with gene annotations @type filepath_gff: str | unicode @return: System command line @rtype: str """ cmd_run_simujobrun = "cd {dir}; {executable} {filepath_genome} {filepath_gff} {seed}" + " >> {log}" cmd = cmd_run_simujobrun.format( dir=directory_strains, executable=self._executable_sim, filepath_genome=filepath_genome, filepath_gff=filepath_gff, seed=self._get_seed(), log=os.path.join(directory_strains, os.path.basename(filepath_genome) + ".sim.log") ) return cmd
Example #7
Source File: demo_sampler_wrapper.py From robosuite with MIT License | 6 votes |
def sample(self): """ This is the core sampling method. Samples a state from a demonstration, in accordance with the configuration. """ # chooses a sampling scheme randomly based on the mixing ratios seed = random.uniform(0, 1) ratio = np.cumsum(self.scheme_ratios) ratio = ratio > seed for i, v in enumerate(ratio): if v: break sample_method = getattr(self, self.sample_method_dict[self.sampling_schemes[i]]) return sample_method()
Example #8
Source File: utils.py From tpu_pretrain with Apache License 2.0 | 6 votes |
def init(args): # init logger log_format = '%(asctime)-10s: %(message)s' if args.log_file is not None and args.log_file != "": Path(args.log_file).parent.mkdir(parents=True, exist_ok=True) logging.basicConfig(level=logging.INFO, filename=args.log_file, filemode='w', format=log_format) logging.warning(f'This will get logged to file: {args.log_file}') else: logging.basicConfig(level=logging.INFO, format=log_format) # create output dir if args.output_dir.is_dir() and list(args.output_dir.iterdir()): logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!") assert 'bert' in args.output_dir.name, \ '''Output dir name has to contain `bert` or `roberta` for AutoModel.from_pretrained to correctly infer the model type''' args.output_dir.mkdir(parents=True, exist_ok=True) # set random seeds random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed)
Example #9
Source File: run_dqn_lander.py From cs294-112_hws with MIT License | 6 votes |
def lander_learn(env, session, num_timesteps, seed): optimizer = lander_optimizer() stopping_criterion = lander_stopping_criterion(num_timesteps) exploration_schedule = lander_exploration_schedule(num_timesteps) dqn.learn( env=env, session=session, exploration=lander_exploration_schedule(num_timesteps), stopping_criterion=lander_stopping_criterion(num_timesteps), double_q=True, **lander_kwargs() ) env.close()
Example #10
Source File: random_robot.py From vergeml with MIT License | 5 votes |
def random_robot_name(moment_of_birth, dest=None): """Construct a random robot name. """ random.seed(moment_of_birth) adj = random.choice(_ADJ) noun = random.choice(_NOUN) name = "{}-{}".format(adj, noun).lower() if dest and os.path.exists(os.path.join(dest, name)): return random_robot_name(datetime.now(), dest) return name
Example #11
Source File: random_robot.py From vergeml with MIT License | 5 votes |
def ascii_robot(moment_of_birth, name, include_phrase=True): """Generate random robot ascii art. """ random.seed(moment_of_birth) def paste(cur, *lines): cur = list(cur) for line in lines: if len(line) > len(cur): cur += [' '] * (len(line) - len(cur)) for i, char in enumerate(line): if char != " ": cur[i] = char return "".join(cur) if random.choice([2, 3]) == 2: top = random.choice(ANTENNA) head = paste(random.choice(EYES), random.choice(EARS)) body = paste("", random.choice(ARMS), random.choice(SIDES), random.choice(CENTER)) bottom = random.choice(FEET) else: top = random.choice(ANTENNA_3) head = paste(random.choice(EYES_3), random.choice(EARS_3)) if random.choice(["thin", "thick"]) == "thick": body = paste("", random.choice(ARMS_3_THICK), random.choice(SIDES_3_THICK), random.choice(CENTER_3_THICK)) else: body = paste("", random.choice(ARMS_3_THIN), random.choice(SIDES_3_THIN), random.choice(CENTER_3_THIN)) bottom = random.choice(FEET3) if include_phrase: phrase = random_phrase(name) bottom = paste(bottom, ' - {}'.format(phrase)) return "\n".join(map(lambda part: " " + part, [top, head, body, bottom]))
Example #12
Source File: libraries.py From vergeml with MIT License | 5 votes |
def setup(env): import tensorflow # pylint: disable=E0401 tensorflow.logging.set_verbosity(tensorflow.logging.ERROR) tensorflow.set_random_seed(env.get('random-seed'))
Example #13
Source File: libraries.py From vergeml with MIT License | 5 votes |
def setup(env): import torch # pylint: disable=E0401 import torch.cuda # pylint: disable=E0401 torch.manual_seed(env.get('random-seed')) torch.cuda.manual_seed(env.get('random-seed'))
Example #14
Source File: libraries.py From vergeml with MIT License | 5 votes |
def setup(env): import numpy numpy.random.seed(env.get('random-seed'))
Example #15
Source File: util.py From EDeN with MIT License | 5 votes |
def random_bipartition(int_range, relative_size=.7, random_state=None): """random_bipartition.""" if not random_state: random_state = random.random() random.seed(random_state) ids = list(range(int_range)) random.shuffle(ids) split_point = int(int_range * relative_size) return ids[:split_point], ids[split_point:]
Example #16
Source File: ml.py From EDeN with MIT License | 5 votes |
def random_bipartition(int_range, relative_size=.7, random_state=None): """random_bipartition.""" if not random_state: random_state = random.random() random.seed(random_state) ids = range(int_range) random.shuffle(ids) split_point = int(int_range * relative_size) return ids[:split_point], ids[split_point:]
Example #17
Source File: cyber.py From cyberdisc-bot with MIT License | 5 votes |
def generatebase64(seed: int) -> str: random.seed(seed) letters = string.ascii_letters + string.digits + "+/=" return "".join(random.choices(letters, k=20))
Example #18
Source File: smatch.py From smatch with MIT License | 5 votes |
def random_init_mapping(candidate_mapping): """ Generate a random node mapping. Args: candidate_mapping: candidate_mapping: candidate node match list Returns: randomly-generated node mapping between two AMRs """ # if needed, a fixed seed could be passed here to generate same random (to help debugging) random.seed() matched_dict = {} result = [] for c in candidate_mapping: candidates = list(c) if not candidates: # -1 indicates no possible mapping result.append(-1) continue found = False while candidates: # randomly generate an index in [0, length of candidates) rid = random.randint(0, len(candidates) - 1) candidate = candidates[rid] # check if it has already been matched if candidate in matched_dict: candidates.pop(rid) else: matched_dict[candidate] = 1 result.append(candidate) found = True break if not found: result.append(-1) return result
Example #19
Source File: build.py From DDPAE-video-prediction with MIT License | 5 votes |
def build(is_train, tb_dir=None): ''' Parse arguments, setup logger and tensorboardX directory. ''' opt, log = args.TrainArgs().parse() if is_train else args.TestArgs().parse() os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus os.makedirs(opt.ckpt_path, exist_ok=True) # Set seed torch.manual_seed(666) torch.cuda.manual_seed_all(666) np.random.seed(666) random.seed(666) logger = Logger(opt.ckpt_path, opt.split) if tb_dir is not None: tb_path = os.path.join(opt.ckpt_path, tb_dir) vis = Visualizer(tb_path) else: vis = None logger.print(log) return opt, logger, vis
Example #20
Source File: GameOfLife.py From BiblioPixelAnimations with MIT License | 5 votes |
def genNewTable(self): self.table = [] random.seed(time.time()) for y in range(0, self.height): self.table.append([]) for x in range(0, self.width): rand = random.randint(0, self._rand_max) if rand == 0: self.table[y].append(1) else: self.table[y].append(0)
Example #21
Source File: GameOfLife.py From BiblioPixelAnimations with MIT License | 5 votes |
def genNewTable(self): self.table = [] random.seed(time.time()) for z in range(self.depth): self.table.append([]) for y in range(0, self.height): self.table[z].append([]) for x in range(0, self.width): rand = random.randint(0, self._rand_max) if rand == 0: self.table[z][y].append(1) else: self.table[z][y].append(0)
Example #22
Source File: builder.py From mmdetection with Apache License 2.0 | 5 votes |
def worker_init_fn(worker_id, num_workers, rank, seed): # The seed of each worker equals to # num_worker * rank + worker_id + user_seed worker_seed = num_workers * rank + worker_id + seed np.random.seed(worker_seed) random.seed(worker_seed)
Example #23
Source File: mutator.py From sandsifter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def rand_byte(): return chr(random.randint(0,255)) # generate an approximate seed instruction # it is probably fine to just randomize the whole thing
Example #24
Source File: mutator.py From sandsifter with BSD 3-Clause "New" or "Revised" License | 5 votes |
def init_mutator(): random.seed() for i in range(1, SEEDS): s = insn() s.raw = generate_seed() q.append(s)
Example #25
Source File: generate.py From subword-qac with MIT License | 5 votes |
def main(args): logger.info(f"Args: {json.dumps(args.__dict__, indent=2, sort_keys=True)}") spm_path = os.path.join('spm', args.spm, "spm.model") logger.info(f"Loading tokenizer from {spm_path}") tokenizer = Tokenizer(spm_path) args.ntoken = ntoken = len(tokenizer) args.branching_factor = min([args.branching_factor, args.ntoken]) logger.info(f" Vocab size: {ntoken}") n_queries_str = f"{f'only {args.n_queries} samples' if args.n_queries else 'all'} quries from" logger.info(f"Reading a dataset ({n_queries_str} test.query.txt)") seen_set = set(read_data(os.path.join(args.data_dir, "train.query.txt"), min_len=args.min_len)) test_data = read_data(os.path.join(args.data_dir, "test.query.txt"), min_len=args.min_len) if args.n_queries: random.seed(args.seed) test_data = random.sample(test_data, args.n_queries) n_seen_test_data = len([x for x in test_data if x in seen_set]) n_unseen_test_data = len(test_data) - n_seen_test_data logger.info(f" Number of test data: {len(test_data):8d} (seen {n_seen_test_data}, unseen {n_unseen_test_data})") logger.info(f"Loading model from {args.model_dir}") model = model_load(args.model_dir) model = model.to(device) logger.info('Generation starts!') with torch.no_grad(): generate(model, tokenizer, test_data, args, seen_set=seen_set, calc_mrl=args.calc_mrl)
Example #26
Source File: validate_and_copy_submissions.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(args): random.seed() temp_dir = tempfile.mkdtemp() logging.info('Created temporary directory: %s', temp_dir) validator = SubmissionValidator( source_dir=args.source_dir, target_dir=args.target_dir, temp_dir=temp_dir, do_copy=args.copy, use_gpu=args.use_gpu, containers_file=args.containers_file) validator.run() logging.info('Deleting temporary directory: %s', temp_dir) subprocess.call(['rm', '-rf', temp_dir])
Example #27
Source File: worker.py From neural-fingerprinting with BSD 3-Clause "New" or "Revised" License | 5 votes |
def main(args): """Main function which runs worker.""" title = '## Starting evaluation of round {0} ##'.format(args.round_name) logging.info('\n' + '#' * len(title) + '\n' + '#' * len(title) + '\n' + '##' + ' ' * (len(title)-2) + '##' + '\n' + title + '\n' + '#' * len(title) + '\n' + '#' * len(title) + '\n' + '##' + ' ' * (len(title)-2) + '##' + '\n') if args.blacklisted_submissions: logging.warning('BLACKLISTED SUBMISSIONS: %s', args.blacklisted_submissions) random.seed() logging.info('Running nvidia-docker to ensure that GPU works') shell_call(['docker', 'run', '--runtime=nvidia', '--rm', 'nvidia/cuda', 'nvidia-smi']) eval_worker = EvaluationWorker( worker_id=args.worker_id, storage_client=eval_lib.CompetitionStorageClient( args.project_id, args.storage_bucket), datastore_client=eval_lib.CompetitionDatastoreClient( args.project_id, args.round_name), storage_bucket=args.storage_bucket, round_name=args.round_name, dataset_name=args.dataset_name, blacklisted_submissions=args.blacklisted_submissions, num_defense_shards=args.num_defense_shards) eval_worker.run_work()
Example #28
Source File: argumenthandler.py From CAMISIM with Apache License 2.0 | 5 votes |
def _read_options(self, options): """ Read passed arguments. @rtype: None """ if not self._validator.validate_file(options.config_file, key='-c'): self._valid_arguments = False return self._file_path_config = self._validator.get_full_path(options.config_file) self._verbose = not options.silent self._debug = options.debug_mode self._phase = options.phase self._dataset_id = options.data_set_id self._max_processors = options.max_processors self._seed = options.seed # self._directory_output = options.output_directory # self._sample_size_in_base_pairs = options.sample_size_gbp # if self._sample_size_in_base_pairs is not None: # self._sample_size_in_base_pairs = long(options.sample_size_gbp * self._base_pairs_multiplication_factor) # self.read_simulator = options.read_simulator # self._error_profile = options.error_profile # self._fragment_size_standard_deviation_in_bp = options.fragment_size_standard_deviation # self._fragments_size_mean_in_bp = options.fragments_size_mean # self.plasmid_file = options.plasmid_file # self._number_of_samples = options.number_of_samples # self._phase_pooled_gsa = options.pooled_gsa
Example #29
Source File: create_joint_gs.py From CAMISIM with Apache License 2.0 | 5 votes |
def parse_options(): """ parse the command line options """ parser = argparse.ArgumentParser() helptext = "Root path of input runs to be considered, can be one or multiple CAMISIM runs (if more than one, they are required to have the same random seed/genome mapping)\nSample folder names are expected to follow this schema: yyyy.mm.dd_hh.mm.ss_sample_" parser.add_argument("-i", "--input-runs", type=str, help=helptext, nargs='+') helptext = "Samples to be considered for pooled gold standards. If none are provided, pooled gold standard is created over all samples" parser.add_argument("-s", "--samples", type=int, help=helptext, nargs='*') helptext = "Output directory for all gold standards and files" parser.add_argument("-o", "--output-directory", type=str, help=helptext) helptext = "Number of threads to be used, default 1" parser.add_argument("-t", "--threads", type=int, default=1,help=helptext) helptext = "Path to the bamToGold perl script" parser.add_argument("-b", "--bamToGold", type=str, help=helptext) helptext = "Seed for the random number generator for shuffling" parser.add_argument("--seed", type=int, default=None, help=helptext) helptext = "Anonymize and shuffle the contigs?" parser.add_argument("-a", "--shuffle_anonymize",type=bool, default=True, help=helptext) if not len(sys.argv) > 1: parser.print_help() return None args = parser.parse_args() return args
Example #30
Source File: strainselector.py From CAMISIM with Apache License 2.0 | 5 votes |
def __init__( self, column_name_genome_id="genome_ID", column_name_otu="OTU", column_name_novelty_category="novelty_category", logfile=None, verbose=True, debug=False, seed=None): """ @param column_name_genome_id: Column name for genome ids @type column_name_genome_id: str @param column_name_otu: Column name for @type column_name_otu: str @param column_name_novelty_category: Column name for @type column_name_novelty_category: str @param logfile: file handler or file path to a log file @type logfile: file | FileIO | StringIO | basestring @param verbose: Not verbose means that only warnings and errors will be past to stream @type verbose: bool @param debug: Display debug messages @type debug: bool @param seed: Seed for random module @return: Nothing @rtype: None """ super(StrainSelector, self).__init__(logfile=logfile, verbose=verbose, debug=debug) assert isinstance(column_name_genome_id, basestring) assert isinstance(column_name_otu, basestring) assert isinstance(column_name_novelty_category, basestring) if seed is not None: random.seed(seed) self._cats = 0 self._draw = 0 self._per_cat = 0 self._rest = 0 self._per_otu = 0 self._column_name_genome_id = column_name_genome_id self._column_name_otu = column_name_otu self._column_name_novelty_category = column_name_novelty_category