Python glob.glob() Examples
The following are 30
code examples of glob.glob().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
glob
, or try the search function
.

Example #1
Source File: get_data.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 10 votes |
def get_cifar10(data_dir): if not os.path.isdir(data_dir): os.system("mkdir " + data_dir) cwd = os.path.abspath(os.getcwd()) os.chdir(data_dir) if (not os.path.exists('train.rec')) or \ (not os.path.exists('test.rec')) : import urllib, zipfile, glob dirname = os.getcwd() zippath = os.path.join(dirname, "cifar10.zip") urllib.urlretrieve("http://data.mxnet.io/mxnet/data/cifar10.zip", zippath) zf = zipfile.ZipFile(zippath, "r") zf.extractall() zf.close() os.remove(zippath) for f in glob.glob(os.path.join(dirname, "cifar", "*")): name = f.split(os.path.sep)[-1] os.rename(f, os.path.join(dirname, name)) os.rmdir(os.path.join(dirname, "cifar")) os.chdir(cwd) # data
Example #2
Source File: misc.py From disentangling_conditional_gans with MIT License | 7 votes |
def locate_result_subdir(run_id_or_result_subdir): if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir): return run_id_or_result_subdir searchdirs = [] searchdirs += [''] searchdirs += ['results'] searchdirs += ['networks'] for searchdir in searchdirs: dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir) dir = os.path.join(dir, str(run_id_or_result_subdir)) if os.path.isdir(dir): return dir prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir) dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*'))) dirs = [dir for dir in dirs if os.path.isdir(dir)] if len(dirs) == 1: return dirs[0] raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
Example #3
Source File: test_notebooks_single_gpu.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def test_completeness(self): """ Make sure that every tutorial that isn't in the whitelist is considered for testing by this file. Exceptions should be added to the whitelist. N.B. If the test is commented out, then that will be viewed as an intentional disabling of the test. """ # Open up this test file. with open(__file__, 'r') as f: notebook_test_text = '\n'.join(f.readlines()) notebooks_path = os.path.join(os.path.dirname(__file__), 'straight_dope_book') notebooks = glob.glob(os.path.join(notebooks_path, '**', '*.ipynb')) # Compile a list of notebooks that are tested tested_notebooks = set(re.findall(r"assert _test_notebook\('(.*)'\)", notebook_test_text)) # Ensure each notebook in the straight dope book directory is on the whitelist or is tested. for notebook in notebooks: friendly_name = '/'.join(notebook.split('/')[-2:]).split('.')[0] if friendly_name not in tested_notebooks and friendly_name not in NOTEBOOKS_WHITELIST: assert False, friendly_name + " has not been added to the nightly/tests/straight_" + \ "dope/test_notebooks_single_gpu.py test_suite. Consider also adding " + \ "it to nightly/tests/straight_dope/test_notebooks_multi_gpu.py as " + \ "well if the notebooks makes use of multiple GPUs."
Example #4
Source File: test_sanity_tutorials.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def test_tutorial_downloadable(): """ Make sure that every tutorial that isn't in the whitelist has the placeholder that enables notebook download """ download_button_string = '<!-- INSERT SOURCE DOWNLOAD BUTTONS -->' tutorial_path = os.path.join(os.path.dirname(__file__), '..', '..', 'docs', 'tutorials') tutorials = glob.glob(os.path.join(tutorial_path, '**', '*.md')) for tutorial in tutorials: with open(tutorial, 'r') as file: lines= file.readlines() last = lines[-1] second_last = lines[-2] downloadable = download_button_string in last or download_button_string in second_last friendly_name = '/'.join(tutorial.split('/')[-2:]) if not downloadable and friendly_name not in whitelist_set: print(last, second_last) assert False, "{} is missing <!-- INSERT SOURCE DOWNLOAD BUTTONS --> as its last line".format(friendly_name)
Example #5
Source File: test_gluon_utils.py From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 | 7 votes |
def test_multiprocessing_download_successful(): """ test download with multiprocessing """ tmp = tempfile.mkdtemp() tmpfile = os.path.join(tmp, 'README.md') process_list = [] # test it with 10 processes for i in range(10): process_list.append(mp.Process( target=_download_successful, args=(tmpfile,))) process_list[i].start() for i in range(10): process_list[i].join() assert os.path.getsize(tmpfile) > 100, os.path.getsize(tmpfile) # check only one file we want left pattern = os.path.join(tmp, 'README.md*') assert len(glob.glob(pattern)) == 1, glob.glob(pattern) # delete temp dir shutil.rmtree(tmp)
Example #6
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 7 votes |
def convert_images2bmp(): # cv2.imread() jpg at 230 img/s, *.bmp at 400 img/s for path in ['../coco/images/val2014/', '../coco/images/train2014/']: folder = os.sep + Path(path).name output = path.replace(folder, folder + 'bmp') if os.path.exists(output): shutil.rmtree(output) # delete output folder os.makedirs(output) # make new output folder for f in tqdm(glob.glob('%s*.jpg' % path)): save_name = f.replace('.jpg', '.bmp').replace(folder, folder + 'bmp') cv2.imwrite(save_name, cv2.imread(f)) for label_path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']: with open(label_path, 'r') as file: lines = file.read() lines = lines.replace('2014/', '2014bmp/').replace('.jpg', '.bmp').replace( '/Users/glennjocher/PycharmProjects/', '../') with open(label_path.replace('5k', '5k_bmp'), 'w') as file: file.write(lines)
Example #7
Source File: simplify_nq_data.py From natural-questions with Apache License 2.0 | 6 votes |
def main(_): """Runs `text_utils.simplify_nq_example` over all shards of a split. Prints simplified examples to a single gzipped file in the same directory as the input shards. """ split = os.path.basename(FLAGS.data_dir) outpath = os.path.join(FLAGS.data_dir, "simplified-nq-{}.jsonl.gz".format(split)) with gzip.open(outpath, "wb") as fout: num_processed = 0 start = time.time() for inpath in glob.glob(os.path.join(FLAGS.data_dir, "nq-*-??.jsonl.gz")): print("Processing {}".format(inpath)) with gzip.open(inpath, "rb") as fin: for l in fin: utf8_in = l.decode("utf8", "strict") utf8_out = json.dumps( text_utils.simplify_nq_example(json.loads(utf8_in))) + u"\n" fout.write(utf8_out.encode("utf8")) num_processed += 1 if not num_processed % 100: print("Processed {} examples in {}.".format(num_processed, time.time() - start))
Example #8
Source File: train_val.py From Collaborative-Learning-for-Weakly-Supervised-Object-Detection with MIT License | 6 votes |
def find_previous(self): sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth') sfiles = glob.glob(sfiles) sfiles.sort(key=os.path.getmtime) # Get the snapshot name in pytorch redfiles = [] for stepsize in cfg.TRAIN.STEPSIZE: redfiles.append(os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.pth'.format(stepsize+1))) sfiles = [ss for ss in sfiles if ss not in redfiles] nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl') nfiles = glob.glob(nfiles) nfiles.sort(key=os.path.getmtime) redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles] nfiles = [nn for nn in nfiles if nn not in redfiles] lsf = len(sfiles) assert len(nfiles) == lsf return lsf, nfiles, sfiles
Example #9
Source File: test.py From DDPAE-video-prediction with MIT License | 6 votes |
def main(): opt, logger, vis = utils.build(is_train=False) dloader = data.get_data_loader(opt) print('Val dataset: {}'.format(len(dloader.dataset))) model = models.get_model(opt) for epoch in opt.which_epochs: # Load checkpoint if epoch == -1: # Find the latest checkpoint checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net*.pth')) assert len(checkpoints) > 0 epochs = [int(filename.split('_')[-1][:-4]) for filename in checkpoints] epoch = max(epochs) logger.print('Loading checkpoints from {}, epoch {}'.format(opt.ckpt_path, epoch)) model.load(opt.ckpt_path, epoch) results = evaluate(opt, dloader, model) for metric in results: logger.print('{}: {}'.format(metric, results[metric]))
Example #10
Source File: views.py From MPContribs with MIT License | 6 votes |
def index(request): ctx = get_context(request) cname = os.environ["PORTAL_CNAME"] template_dir = get_app_template_dirs("templates/notebooks")[0] htmls = os.path.join(template_dir, cname, "*.html") ctx["notebooks"] = [ p.split("/" + cname + "/")[-1].replace(".html", "") for p in glob(htmls) ] ctx["PORTAL_CNAME"] = cname ctx["landing_pages"] = [] mask = ["project", "title", "authors", "is_public", "description", "urls"] client = Client(headers=get_consumer(request)) # sets/returns global variable entries = client.projects.get_entries(_fields=mask).result()["data"] for entry in entries: authors = entry["authors"].strip().split(",", 1) if len(authors) > 1: authors[1] = authors[1].strip() entry["authors"] = authors entry["description"] = entry["description"].split(".", 1)[0] + "." ctx["landing_pages"].append( entry ) # visibility governed by is_public flag and X-Consumer-Groups header return render(request, "home.html", ctx.flatten())
Example #11
Source File: dataset_tool.py From disentangling_conditional_gans with MIT License | 6 votes |
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121): print('Loading CelebA from "%s"' % celeba_dir) glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png') image_filenames = sorted(glob.glob(glob_pattern)) expected_images = 202599 if len(image_filenames) != expected_images: error('Expected to find %d images' % expected_images) with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: order = tfr.choose_shuffled_order() for idx in range(order.size): img = np.asarray(PIL.Image.open(image_filenames[order[idx]])) assert img.shape == (218, 178, 3) img = img[cy - 64 : cy + 64, cx - 64 : cx + 64] img = img.transpose(2, 0, 1) # HWC => CHW tfr.add_image(img) #----------------------------------------------------------------------------
Example #12
Source File: kuka_diverse_object_gym_env.py From soccer-matlab with BSD 2-Clause "Simplified" License | 6 votes |
def _get_random_object(self, num_objects, test): """Randomly choose an object urdf from the random_urdfs directory. Args: num_objects: Number of graspable objects. Returns: A list of urdf filenames. """ if test: urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*0/*.urdf') else: urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*[^0]/*.urdf') found_object_directories = glob.glob(urdf_pattern) total_num_objects = len(found_object_directories) selected_objects = np.random.choice(np.arange(total_num_objects), num_objects) selected_objects_filenames = [] for object_index in selected_objects: selected_objects_filenames += [found_object_directories[object_index]] return selected_objects_filenames
Example #13
Source File: run_doctest.py From OpenFermion-Cirq with Apache License 2.0 | 6 votes |
def main(): quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q' file_names = glob.glob('openfermion-cirq/**/*.py', recursive=True) failed, attempted = run_tests(file_names, include_modules=True, include_local=False, quiet=quiet) if failed != 0: print( shell_tools.highlight( f'Failed: {failed} failed, ' '{attempted - failed} passed, {attempted} total', shell_tools.RED)) sys.exit(1) else: print(shell_tools.highlight(f'Passed: {attempted}', shell_tools.GREEN)) sys.exit(0)
Example #14
Source File: checkpoint.py From deep-summarization with MIT License | 6 votes |
def get_last_checkpoint(self): """ Assumes that the last checpoint has a higher checkpoint id. Checkpoint will be saved in this exact format model_<checkpint_id>.ckpt Eg - model_100.ckpt :return: """ ''' ''' self.present_checkpoints = glob.glob(self.get_checkpoint_location() + '/*.ckpt') if len(self.present_checkpoints) != 0: present_ids = [self.__get_id(ckpt) for ckpt in self.present_checkpoints] # sort the ID's and return the model for the last ID present_ids.sort() self.last_id = present_ids[-1] self.last_ckpt = self.get_checkpoint_location() + '/model_' +\ str(self.last_id) + '.ckpt' return self.last_ckpt
Example #15
Source File: checkpoint.py From deep-summarization with MIT License | 6 votes |
def delete_previous_checkpoints(self, num_previous=5): """ Deletes all previous checkpoints that are <num_previous> before the present checkpoint. This is done to prevent blowing out of memory due to too many checkpoints :param num_previous: :return: """ self.present_checkpoints = glob.glob(self.get_checkpoint_location() + '/*.ckpt') if len(self.present_checkpoints) > num_previous: present_ids = [self.__get_id(ckpt) for ckpt in self.present_checkpoints] present_ids.sort() ids_2_delete = present_ids[0:len(present_ids) - num_previous] for ckpt_id in ids_2_delete: ckpt_file_nm = self.get_checkpoint_location() + '/model_' + str(ckpt_id) + '.ckpt' os.remove(ckpt_file_nm)
Example #16
Source File: datasets.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def __init__(self, path, img_size=416, half=False): path = str(Path(path)) # os-agnostic files = [] if os.path.isdir(path): files = sorted(glob.glob(os.path.join(path, '*.*'))) elif os.path.isfile(path): files = [path] images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] nI, nV = len(images), len(videos) self.img_size = img_size self.files = images + videos self.nF = nI + nV # number of files self.video_flag = [False] * nI + [True] * nV self.mode = 'images' self.half = half # half precision fp16 images if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nF > 0, 'No images or videos found in ' + path
Example #17
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43): # Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels() if os.path.exists('new/'): shutil.rmtree('new/') # delete output folder os.makedirs('new/') # make new output folder os.makedirs('new/labels/') os.makedirs('new/images/') for file in tqdm(sorted(glob.glob('%s/*.*' % path))): with open(file, 'r') as f: labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) i = labels[:, 0] == label_class if any(i): img_file = file.replace('labels', 'images').replace('txt', 'jpg') labels[:, 0] = 0 # reset class to 0 with open('new/images.txt', 'a') as f: # add image to dataset list f.write(img_file + '\n') with open('new/labels/' + Path(file).name, 'a') as f: # write label for l in labels[i]: f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l)) shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
Example #18
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_results(start=0, stop=0): # from utils.utils import *; plot_results() # Plot training results files 'results*.txt' fig, ax = plt.subplots(2, 5, figsize=(14, 7)) ax = ax.ravel() s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall', 'val GIoU', 'val Objectness', 'val Classification', 'mAP', 'F1'] for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T n = results.shape[1] # number of rows x = range(start, min(stop, n) if stop else n) for i in range(10): y = results[i, x] if i in [0, 1, 2, 5, 6, 7]: y[y == 0] = np.nan # dont show zero loss values ax[i].plot(x, y, marker='.', label=f.replace('.txt', '')) ax[i].set_title(s[i]) if i in [5, 6, 7]: # share train and val loss y axes ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) fig.tight_layout() ax[1].legend() fig.savefig('results.png', dpi=200)
Example #19
Source File: utils.py From pruning_yolov3 with GNU General Public License v3.0 | 6 votes |
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay() # Plot training results files 'results*.txt', overlaying train and val losses s = ['train', 'train', 'train', 'Precision', 'mAP', 'val', 'val', 'val', 'Recall', 'F1'] # legends t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T n = results.shape[1] # number of rows x = range(start, min(stop, n) if stop else n) fig, ax = plt.subplots(1, 5, figsize=(14, 3.5)) ax = ax.ravel() for i in range(5): for j in [i, i + 5]: y = results[j, x] if i in [0, 1, 2]: y[y == 0] = np.nan # dont show zero loss values ax[i].plot(x, y, marker='.', label=s[j]) ax[i].set_title(t[i]) ax[i].legend() ax[i].set_ylabel(f) if i == 0 else None # add filename fig.tight_layout() fig.savefig(f.replace('.txt', '.png'), dpi=200)
Example #20
Source File: io.py From vergeml with MIT License | 5 votes |
def scan(self, path, exclude=[]) -> List[str]: """Scan path for matching files. :param path: the path to scan :param exclude: a list of directories to exclude :return: a list of sorted filenames """ res = [] path = path.rstrip("/").rstrip("\\") for pat in self.input_patterns: res.extend(glob.glob(path + os.sep + pat, recursive=True)) res = list(filter(lambda p: os.path.isfile(p), res)) if exclude: def excluded(path): for e in exclude: if path.startswith(e): return True return False res = list(filter(lambda p: not excluded(p), res)) return sorted(res)
Example #21
Source File: os_utils.py From godot-mono-builds with MIT License | 5 votes |
def globs(pathnames, dirpath='.'): import glob files = [] for pathname in pathnames: files.extend(glob.glob(os.path.join(dirpath, pathname))) return files
Example #22
Source File: test.py From aegea with Apache License 2.0 | 5 votes |
def test_deploy_utils(self): deploy_utils_bindir = os.path.join(pkg_root, "aegea", "rootfs.skel", "usr", "bin") for script in glob.glob(deploy_utils_bindir + "/aegea*"): self.call([script, "--help"], expect=[dict(return_codes=[0, 1])]) for script in "aegea-deploy-pilot", "aegea-git-ssh-helper": self.call(os.path.join(deploy_utils_bindir, script), expect=[dict(return_codes=[2], stderr="(required|too few)")])
Example #23
Source File: eval_utils.py From natural-questions with Apache License 2.0 | 5 votes |
def read_annotation(path_name, n_threads=10): """Read annotations with real multiple processes.""" input_paths = glob.glob(path_name) pool = multiprocessing.Pool(n_threads) try: dict_list = pool.map(read_annotation_from_one_split, input_paths) finally: pool.close() pool.join() final_dict = {} for single_dict in dict_list: final_dict.update(single_dict) return final_dict
Example #24
Source File: pre_submission.py From MPContribs with MIT License | 5 votes |
def run(mpfile): identifier = mpfile.ids[0] xcol, ycol = "V [V]", "J {}°C {} [mA/cm²]" full_df = None for fn in sorted(glob(os.path.join("Data", "Figure 4", "*_01_DIV.txt"))): with open(fn, "r") as f: name = os.path.splitext(os.path.basename(fn))[0] body = "\n".join(["\t".join([xcol, ycol]), f.read()]) df = ( read_csv(body, sep="\t") .apply(to_numeric, errors="coerce") .sort_values(by=[xcol]) ) if full_df is None: full_df = df[xcol].to_frame() offset = 0.0 if "fwd_dB_p3" in name: offset = -6.70273000e-11 elif "rev_dB_p3" in name: offset = 4.49694000e-10 elif "fwd_dG_p6" in name: offset = -8.90037000e-11 elif "rev_dG_p6" in name: offset = 8.42196000e-10 temp = name[4:].split("CZnO", 1)[0] direction = "fwd" if "fwd" in name else "rev" col = ycol.format(temp, direction) full_df[col] = (df[ycol] + offset).abs() * 1000.0 / 0.045 mpfile.add_data_table(identifier, full_df, "JV|dark")
Example #25
Source File: setup.py From django-click with MIT License | 5 votes |
def extra_requirements(cls, glob_pattern): before, after = glob_pattern.split("*", 1) pattern = os.path.join(os.path.dirname(__file__), glob_pattern) requirements = {} for path in glob.glob(pattern): name = path[len(before) : -len(after)] requirements[name] = cls.requirements(path) return requirements
Example #26
Source File: dataset_tool.py From disentangling_conditional_gans with MIT License | 5 votes |
def create_from_images(tfrecord_dir, image_dir, label_dir, shuffle): print('Loading images from "%s"' % image_dir) image_filenames = sorted(glob.glob(os.path.join(image_dir, '*'))) if len(image_filenames) == 0: error('No input images found') img = np.asarray(PIL.Image.open(image_filenames[0])) resolution = img.shape[0] channels = img.shape[2] if img.ndim == 3 else 1 if img.shape[1] != resolution: error('Input images must have the same width and height') if resolution != 2 ** int(np.floor(np.log2(resolution))): error('Input image resolution must be a power-of-two') if channels not in [1, 3]: error('Input images must be stored as RGB or grayscale') try: with open(label_dir, 'rb') as file: labels = pickle.load(file) except: error('Label file was not found') with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr: order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames)) reordered_names = [] for idx in range(order.size): image_filename = image_filenames[order[idx]] img = np.asarray(PIL.Image.open(image_filename)) if channels == 1: img = img[np.newaxis, :, :] # HW => CHW else: img = img.transpose(2, 0, 1) # HWC => CHW tfr.add_image(img) reordered_names.append(os.path.basename(image_filename)) reordered_labels = [] for key in reordered_names: reordered_labels += [labels[key]] reordered_labels = np.stack(reordered_labels, 0) tfr.add_labels(reordered_labels) #----------------------------------------------------------------------------
Example #27
Source File: misc.py From disentangling_conditional_gans with MIT License | 5 votes |
def create_result_subdir(result_dir, desc): # Select run ID and create subdir. while True: run_id = 0 for fname in glob.glob(os.path.join(result_dir, '*')): try: fbase = os.path.basename(fname) ford = int(fbase[:fbase.find('-')]) run_id = max(run_id, ford + 1) except ValueError: pass result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc)) try: os.makedirs(result_subdir) break except OSError: if os.path.isdir(result_subdir): continue raise print("Saving results to", result_subdir) set_output_log_file(os.path.join(result_subdir, 'log.txt')) # Export config. try: with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout: for k, v in sorted(config.__dict__.items()): if not k.startswith('_'): fout.write("%s = %s\n" % (k, str(v))) except: pass return result_subdir
Example #28
Source File: misc.py From disentangling_conditional_gans with MIT License | 5 votes |
def list_network_pkls(run_id_or_result_subdir, include_final=True): result_subdir = locate_result_subdir(run_id_or_result_subdir) pkls = sorted(glob.glob(os.path.join(result_subdir, 'network-*.pkl'))) if len(pkls) >= 1 and os.path.basename(pkls[0]) == 'network-final.pkl': if include_final: pkls.append(pkls[0]) del pkls[0] return pkls
Example #29
Source File: face_attack.py From Adversarial-Face-Attack with GNU General Public License v3.0 | 5 votes |
def compute_victim(self, lfw_160_path, name): imgfolder = os.path.join(lfw_160_path, name) assert os.path.isdir(imgfolder), imgfolder images = glob.glob(os.path.join(imgfolder, '*.png')) + glob.glob(os.path.join(imgfolder, '*.jpg')) image_batch = [cv2.imread(f, cv2.IMREAD_COLOR)[:, :, ::-1] for f in images] for img in image_batch: assert img.shape[0] == 160 and img.shape[1] == 160, \ "--data should only contain 160x160 images. Please read the README carefully." embeddings = self.eval_embeddings(image_batch) self.victim_embeddings = embeddings return embeddings
Example #30
Source File: __init__.py From django-template with MIT License | 5 votes |
def import_env_vars(directory): """ List the files present in the given directory and for each of them create an environment variable named after the file, and which value is the contents of the file. """ env_vars = glob.glob(os.path.join(directory, "*")) for env_var in env_vars: with open(env_var, "r") as env_var_file: os.environ.setdefault( env_var.split(os.sep)[-1], env_var_file.read().strip() )