Python glob.iglob() Examples

The following are 30 code examples for showing how to use glob.iglob(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may want to check out the right sidebar which shows the related API usage.

You may also want to check out all available functions/classes of the module glob , or try the search function .

Example 1
Project: zun   Author: openstack   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def get_vf_num_by_pci_address(pci_addr):
    """Get the VF number based on a VF's pci address

    A VF is associated with an VF number, which ip link command uses to
    configure it. This number can be obtained from the PCI device filesystem.
    """
    VIRTFN_RE = re.compile(r"virtfn(\d+)")
    virtfns_path = "/sys/bus/pci/devices/%s/physfn/virtfn*" % (pci_addr)
    vf_num = None
    try:
        for vf_path in glob.iglob(virtfns_path):
            if re.search(pci_addr, os.readlink(vf_path)):
                t = VIRTFN_RE.search(vf_path)
                vf_num = t.group(1)
                break
    except Exception:
        pass
    if vf_num is None:
        raise exception.PciDeviceNotFoundById(id=pci_addr)
    return vf_num 
Example 2
Project: ncm2   Author: ncm2   File: ncm2_core.py    License: MIT License 6 votes vote down vote up
def load_plugin(self, _, rtp: str):
        self.update_rtp(rtp)

        for d in rtp.split(','):
            for vs in glob.iglob(path.join(d, 'ncm2-plugin/*.vim')):
                if vs in self._loaded_plugins:
                    continue
                self._loaded_plugins[vs] = True
                logger.info('send vimscript plugin %s', vs)
                self.notify('ncm2#_load_vimscript', vs)

            for py in glob.iglob(path.join(d, 'ncm2-plugin/*.py')):
                if py in self._loaded_plugins:
                    continue
                self._loaded_plugins[py] = True
                logger.info('send python plugin %s', py)
                # async_call to get multiple exceptions properly printed
                self.nvim.async_call(partial(self.load_python, _, py))

            dts = glob.glob(path.join(d, 'pythonx/ncm2_subscope_detector/*.py')) + \
                glob.glob(path.join(d, 'python3/ncm2_subscope_detector/*.py'))
            self.load_subscope_detectors(dts)

        self.notify('ncm2#_au_plugin') 
Example 3
Project: decaNLP   Author: salesforce   File: generic.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, path, field, subsample=None, **kwargs):
        fields = [(x, field) for x in self.fields]
        examples = []
        labels = {'neg': 'negative', 'pos': 'positive'}
        question = 'Is this review negative or positive?'

        cache_name = os.path.join(os.path.dirname(path), '.cache', os.path.basename(path), str(subsample))
        if os.path.exists(cache_name):
            print(f'Loading cached data from {cache_name}')
            examples = torch.load(cache_name)
        else:
            for label in ['pos', 'neg']:
                for fname in glob.iglob(os.path.join(path, label, '*.txt')):
                    with open(fname, 'r') as f:
                        context = f.readline()
                    answer = labels[label]
                    context_question = get_context_question(context, question) 
                    examples.append(data.Example.fromlist([context, question, answer, CONTEXT_SPECIAL, QUESTION_SPECIAL, context_question], fields))
                    if subsample is not None and len(examples) > subsample:
                        break
            os.makedirs(os.path.dirname(cache_name), exist_ok=True)
            print(f'Caching data to {cache_name}')
            torch.save(examples, cache_name)
        super(imdb.IMDb, self).__init__(examples, fields, **kwargs) 
Example 4
Project: decaNLP   Author: salesforce   File: imdb.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, path, text_field, label_field, **kwargs):
        """Create an IMDB dataset instance given a path and fields.

        Arguments:
            path: Path to the dataset's highest level directory
            text_field: The field that will be used for text data.
            label_field: The field that will be used for label data.
            Remaining keyword arguments: Passed to the constructor of
                data.Dataset.
        """
        fields = [('text', text_field), ('label', label_field)]
        examples = []

        for label in ['pos', 'neg']:
            for fname in glob.iglob(os.path.join(path, label, '*.txt')):
                with open(fname, 'r') as f:
                    text = f.readline()
                examples.append(data.Example.fromlist([text, label], fields))

        super(IMDb, self).__init__(examples, fields, **kwargs) 
Example 5
Project: decaNLP   Author: salesforce   File: translation.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def clean(path):
        for f_xml in glob.iglob(os.path.join(path, '*.xml')):
            print(f_xml)
            f_txt = os.path.splitext(f_xml)[0]
            with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
                root = ET.parse(f_xml).getroot()[0]
                for doc in root.findall('doc'):
                    for e in doc.findall('seg'):
                        fd_txt.write(e.text.strip() + '\n')

        xml_tags = ['<url', '<keywords', '<talkid', '<description',
                    '<reviewer', '<translator', '<title', '<speaker']
        for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
            print(f_orig)
            f_txt = f_orig.replace('.tags', '')
            with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
                    io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
                for l in fd_orig:
                    if not any(tag in l for tag in xml_tags):
                        fd_txt.write(l.strip() + '\n') 
Example 6
Project: seamseg   Author: mapillary   File: dataset.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, in_dir, transform):
        super(ISSTestDataset, self).__init__()
        self.in_dir = in_dir
        self.transform = transform

        # Find all images
        self._images = []
        for img_path in chain(
                *(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
            _, name_with_ext = path.split(img_path)
            idx, _ = path.splitext(name_with_ext)

            with Image.open(img_path) as img_raw:
                size = (img_raw.size[1], img_raw.size[0])

            self._images.append({
                "idx": idx,
                "path": img_path,
                "size": size,
            }) 
Example 7
Project: traces   Author: datascopeanalytics   File: helloworld.py    License: MIT License 6 votes vote down vote up
def read_all(pattern='data/lightbulb-*.csv'):
    """Read all of the CSVs in a directory matching the filename pattern
    as TimeSeries.

    """
    result = []
    for filename in glob.iglob(pattern):
        print('reading', filename, file=sys.stderr)
        ts = traces.TimeSeries.from_csv(
            filename,
            time_column=0,
            time_transform=parse_iso_datetime,
            value_column=1,
            value_transform=int,
            default=0,
        )
        ts.compact()
        result.append(ts)
    return result 
Example 8
Project: ec2-api   Author: openstack   File: schema_diff.py    License: Apache License 2.0 6 votes vote down vote up
def _migrate_get_earliest_version():
    versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')

    versions = []
    for path in glob.iglob(versions_glob):
        filename = os.path.basename(path)
        prefix = filename.split('_', 1)[0]
        try:
            version = int(prefix)
        except ValueError:
            pass
        versions.append(version)

    versions.sort()
    return versions[0]


### Git 
Example 9
Project: typhon   Author: atmtools   File: fileset.py    License: MIT License 6 votes vote down vote up
def _get_matching_files(self, path, regex, start, end,):
        """Yield files that matches the search conditions.

        Args:
            path: Path to the directory that contains the files that should be
                checked.
            regex: A regular expression that should match the filename.
            start: Datetime that defines the start of a time interval.
            end: Datetime that defines the end of a time interval. The time
                coverage of the file should overlap with this interval.

        Yields:
            A FileInfo object with the file path and time coverage
        """

        for filename in glob.iglob(os.path.join(path, "*")):
            if regex.match(filename):
                file_info = self.get_info(filename)

                # Test whether the file is overlapping the interval between
                # start and end date.
                if IntervalTree.interval_overlaps(
                        file_info.times, (start, end))\
                        and not self.is_excluded(file_info):
                    yield file_info 
Example 10
Project: jbox   Author: jpush   File: __init__.py    License: MIT License 6 votes vote down vote up
def convert(installers, dest_dir, verbose):
    require_pkgresources('wheel convert')

    # Only support wheel convert if pkg_resources is present
    from ..wininst2wheel import bdist_wininst2wheel
    from ..egg2wheel import egg2wheel

    for pat in installers:
        for installer in iglob(pat):
            if os.path.splitext(installer)[1] == '.egg':
                conv = egg2wheel
            else:
                conv = bdist_wininst2wheel
            if verbose:
                sys.stdout.write("{0}... ".format(installer))
                sys.stdout.flush()
            conv(installer, dest_dir)
            if verbose:
                sys.stdout.write("OK\n") 
Example 11
Project: pyDcop   Author: Orange-OpenSource   File: consolidate.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def distribution_cost(
    dcop_files: List[str], distribution_file, algo, target
):
    logger.debug(f"analyse file {dcop_files}")

    dcop = load_dcop_from_file(dcop_files)
    path_glob = os.path.abspath(os.path.expanduser(distribution_file))
    distribution_files = sorted(glob.iglob(path_glob))
    for distribution_file in distribution_files:

        try:
            cost, comm, hosting = single_distrib_costs(
                dcop, distribution_file, algo
            )

            csv_writer = csv.writer(target)
            csv_writer.writerow([dcop_files[0], distribution_file, cost, hosting, comm])
        except:
            pass
    return target 
Example 12
Project: pyDcop   Author: Orange-OpenSource   File: batch.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def input_files_glob(path_glob: str) -> List[str]:
    """
    Find files matching a glob expression.

    Parameters
    ----------
    path_glob: str
        unix style glob expression (e.g. '/home/user/foo/bar*.json')

    Returns
    -------

    """
    path_glob = os.path.abspath(os.path.expanduser(path_glob))
    logger.debug("Looking for files in %s", path_glob)
    return list(glob.iglob(path_glob)) 
Example 13
Project: fairseq   Author: pytorch   File: wav2vec_manifest.py    License: MIT License 6 votes vote down vote up
def main(args):
    assert args.valid_percent >= 0 and args.valid_percent <= 1.

    dir_path = os.path.realpath(args.root)
    search_path = os.path.join(dir_path, '**/*.' + args.ext)
    rand = random.Random(args.seed)

    with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(
            os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
        print(dir_path, file=train_f)
        print(dir_path, file=valid_f)

        for fname in glob.iglob(search_path, recursive=True):
            file_path = os.path.realpath(fname)

            if args.path_must_contain and args.path_must_contain not in file_path:
                continue

            frames = soundfile.info(fname).frames
            dest = train_f if rand.random() > args.valid_percent else valid_f
            print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest) 
Example 14
Project: AUCR   Author: AUCR   File: mq.py    License: GNU General Public License v3.0 6 votes vote down vote up
def get_mq_yaml_configs():
    """MQ aucr yaml config file from each plugin."""
    mq_yaml_dict_config = {}
    tasks_mq_list = []
    reports_mq_list = []
    analysis_mq_list = []
    for filename in glob.iglob('aucr_app/plugins/**/mqtasks.yml', recursive=True):
        mq_tasks = YamlInfo(filename, "none", "none")
        run = mq_tasks.get()
        if "tasks" in run:
            tasks_mq_list.append(run["tasks"])
        if "reports" in run:
            reports_mq_list.append(run["reports"])
        if "analysis" in run:
            analysis_mq_list.append(run["analysis"])
    mq_yaml_dict_config["tasks"] = tasks_mq_list
    mq_yaml_dict_config["reports"] = reports_mq_list
    mq_yaml_dict_config["analysis"] = analysis_mq_list
    return mq_yaml_dict_config 
Example 15
Project: AUCR   Author: AUCR   File: utils.py    License: GNU General Public License v3.0 6 votes vote down vote up
def get_group_permission_navbar():
    """Return group nav list from database."""
    if current_user:
        current_user_id = current_user.id
    else:
        current_user_id = 1
    user_groups_ids["items"] = Group.query.filter_by(username_id=current_user_id).all()
    user_groups_links = {}
    main_list = []
    for items in user_groups_ids["items"]:
        group_object = Groups.query.filter_by(id=items.groups_id).first()
        for filename in glob.iglob('aucr_app/plugins/**/navbar.yml', recursive=True):
            menu_links = YamlInfo(filename, "none", "none")
            run = menu_links.get()
            main_result_list = generate_navbar_list_item("main", run, group_object.name, main_list)
            if main_result_list:
                main_list = main_result_list
    # Used to make sure empty lists are not populating the navbar dictionary
    if main_list:
        user_groups_links["main"] = main_list
    return user_groups_links 
Example 16
Project: auto-alt-text-lambda-api   Author: abhisuri97   File: __init__.py    License: MIT License 6 votes vote down vote up
def convert(installers, dest_dir, verbose):
    require_pkgresources('wheel convert')

    # Only support wheel convert if pkg_resources is present
    from ..wininst2wheel import bdist_wininst2wheel
    from ..egg2wheel import egg2wheel

    for pat in installers:
        for installer in iglob(pat):
            if os.path.splitext(installer)[1] == '.egg':
                conv = egg2wheel
            else:
                conv = bdist_wininst2wheel
            if verbose:
                sys.stdout.write("{0}... ".format(installer))
                sys.stdout.flush()
            conv(installer, dest_dir)
            if verbose:
                sys.stdout.write("OK\n") 
Example 17
Project: How_to_generate_music_in_tensorflow_LIVE   Author: llSourcell   File: utils.py    License: Apache License 2.0 6 votes vote down vote up
def convert_midi2mp3():
    """ Convert all midi files of the given directory to mp3
    """
    input_dir = 'docs/midi/'
    output_dir = 'docs/mp3/'

    assert os.path.exists(input_dir)
    os.makedirs(output_dir, exist_ok=True)

    print('Converting:')
    i = 0
    for filename in glob.iglob(os.path.join(input_dir, '**/*.mid'), recursive=True):
        print(filename)
        in_name = filename
        out_name = os.path.join(output_dir, os.path.splitext(os.path.basename(filename))[0] + '.mp3')
        command = 'timidity {} -Ow -o - | ffmpeg -i - -acodec libmp3lame -ab 64k {}'.format(in_name, out_name)  # TODO: Redirect stdout to avoid polluting the screen (have cleaner printing)
        subprocess.call(command, shell=True)
        i += 1
    print('{} files converted.'.format(i)) 
Example 18
def windows_package(args):
    pkgfile = 'windows_package.7z'
    pkgdir = os.path.abspath('windows_package')
    logging.info("Packaging libraries and headers in package: %s", pkgfile)
    j = os.path.join
    pkgdir_lib = os.path.abspath(j(pkgdir, 'lib'))
    with remember_cwd():
        os.chdir(args.output)
        logging.info("Looking for static libraries and dlls in: \"%s", os.getcwd())
        libs = list(glob.iglob('**/*.lib', recursive=True))
        dlls = list(glob.iglob('**/*.dll', recursive=True))
        os.makedirs(pkgdir_lib, exist_ok=True)
        for lib in libs:
            logging.info("packing lib: %s", lib)
            shutil.copy(lib, pkgdir_lib)
        for dll in dlls:
            logging.info("packing dll: %s", dll)
            shutil.copy(dll, pkgdir_lib)
        os.chdir(get_mxnet_root())
        logging.info('packing python bindings')
        copy_tree('python', j(pkgdir, 'python'))
        logging.info('packing headers')
        copy_tree('include', j(pkgdir, 'include'))
        copy_tree(j('3rdparty','dmlc-core','include'), j(pkgdir, 'include'))
        copy_tree(j('3rdparty','mshadow', 'mshadow'), j(pkgdir, 'include', 'mshadow'))
        copy_tree(j('3rdparty','tvm','nnvm', 'include'), j(pkgdir,'include', 'nnvm', 'include'))
        logging.info("Compressing package: %s", pkgfile)
        check_call(['7z', 'a', pkgfile, pkgdir]) 
Example 19
Project: Paradrop   Author: ParadropLabs   File: procmon.py    License: Apache License 2.0 5 votes vote down vote up
def check(self):
        """
        Check that the service is running and consistent with pid file(s).

        Returns True or False.
        """
        # Set of pids (strings) where the command string matches what we are
        # looking for.
        detected_pids = set()

        # Set of pids (strings) that are both running processes and found in
        # pid files.
        consistent_pids = set()

        # Search for running processes that match our command string.
        for proc in psutil.process_iter():
            try:
                if self.cmdstring in proc.name():
                    detected_pids.add(str(proc.pid))

            # We could also get psutil.ZombieProcess or
            # psutil.AccessDenied.  We want those to be logged.
            except psutil.NoSuchProcess:
                pass

        # Search for pid file(s) and check consistency.
        for pidfile in self.pidfiles:
            for path in glob.iglob(pidfile):
                with open(path, 'r') as source:
                    pid = source.read().strip()

                if pid in detected_pids:
                    consistent_pids.add(pid)
                else:
                    # Delete the stale pid file.
                    os.remove(path)

        return len(consistent_pids) > 0 
Example 20
Project: calmjs   Author: calmjs   File: indexer.py    License: GNU General Public License v2.0 5 votes vote down vote up
def globber_root(root, patt):
    return iglob(join(root, patt)) 
Example 21
Project: tensorrt-utils   Author: rmccorm4   File: ImagenetCalibrator.py    License: Apache License 2.0 5 votes vote down vote up
def get_calibration_files(calibration_data, max_calibration_size=None, allowed_extensions=(".jpeg", ".jpg", ".png")):
    """Returns a list of all filenames ending with `allowed_extensions` found in the `calibration_data` directory.

    Parameters
    ----------
    calibration_data: str
        Path to directory containing desired files.
    max_calibration_size: int
        Max number of files to use for calibration. If calibration_data contains more than this number,
        a random sample of size max_calibration_size will be returned instead. If None, all samples will be used.

    Returns
    -------
    calibration_files: List[str]
         List of filenames contained in the `calibration_data` directory ending with `allowed_extensions`.
    """

    logger.info("Collecting calibration files from: {:}".format(calibration_data))
    calibration_files = [path for path in glob.iglob(os.path.join(calibration_data, "**"), recursive=True)
                         if os.path.isfile(path) and path.lower().endswith(allowed_extensions)]
    logger.info("Number of Calibration Files found: {:}".format(len(calibration_files)))

    if len(calibration_files) == 0:
        raise Exception("ERROR: Calibration data path [{:}] contains no files!".format(calibration_data))

    if max_calibration_size:
        if len(calibration_files) > max_calibration_size:
            logger.warning("Capping number of calibration images to max_calibration_size: {:}".format(max_calibration_size))
            random.seed(42)  # Set seed for reproducibility
            calibration_files = random.sample(calibration_files, max_calibration_size)

    return calibration_files


# https://docs.nvidia.com/deeplearning/sdk/tensorrt-api/python_api/infer/Int8/EntropyCalibrator2.html 
Example 22
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
    if ignore_glob:
        ignore_glob = os.path.join(basedir, ignore_glob)

    def add_test_methods(test_class):
        ignored = set(glob.iglob(ignore_glob))

        for filename in glob.iglob(os.path.join(basedir, tests_glob)):
            if filename in ignored:
                continue

            validating, _ = os.path.splitext(os.path.basename(filename))
            id = itertools.count(1)

            with open(filename) as test_file:
                for case in json.load(test_file):
                    for test in case["tests"]:
                        name = "test_%s_%s_%s" % (
                            validating,
                            next(id),
                            re.sub(r"[\W ]+", "_", test["description"]),
                        )
                        assert not hasattr(test_class, name), name

                        test_case = make_case(
                            data=test["data"],
                            schema=case["schema"],
                            valid=test["valid"],
                            name=name,
                        )
                        test_case = maybe_skip(skip, test_case, case, test)
                        setattr(test_class, name, test_case)

        return test_class
    return add_test_methods 
Example 23
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
    if ignore_glob:
        ignore_glob = os.path.join(basedir, ignore_glob)

    def add_test_methods(test_class):
        ignored = set(glob.iglob(ignore_glob))

        for filename in glob.iglob(os.path.join(basedir, tests_glob)):
            if filename in ignored:
                continue

            validating, _ = os.path.splitext(os.path.basename(filename))
            id = itertools.count(1)

            with open(filename) as test_file:
                for case in json.load(test_file):
                    for test in case["tests"]:
                        name = "test_%s_%s_%s" % (
                            validating,
                            next(id),
                            re.sub(r"[\W ]+", "_", test["description"]),
                        )
                        assert not hasattr(test_class, name), name

                        test_case = make_case(
                            data=test["data"],
                            schema=case["schema"],
                            valid=test["valid"],
                            name=name,
                        )
                        test_case = maybe_skip(skip, test_case, case, test)
                        setattr(test_class, name, test_case)

        return test_class
    return add_test_methods 
Example 24
Project: object-detection   Author: cristianpb   File: camera_pi.py    License: MIT License 5 votes vote down vote up
def ObjectTracking(self):
    detector = Detector()
    myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                        recursive=True)
    newdict = reduce(lambda a, b: reduce_tracking(a,b), myiter, dict())
    startID = max(map(int, newdict.keys()), default=0) + 1
    ct = CentroidTracker(startID=startID)
    with PiCamera() as camera:
        camera.resolution = (1280, 960)  # twice height and widht
        camera.rotation = int(str(os.environ['CAMERA_ROTATION']))
        camera.framerate = 10
        with PiRGBArray(camera, size=(WIDTH, HEIGHT)) as output:
            while True:
                camera.capture(output, 'bgr', resize=(WIDTH, HEIGHT))
                img = output.array
                result = detector.prediction(img)
                df = detector.filter_prediction(result, img)
                img = detector.draw_boxes(img, df)
                boxes = df[['x1', 'y1', 'x2', 'y2']].values
                previous_object_ID = ct.nextObjectID
                objects = ct.update(boxes)
                if len(boxes) > 0 and (df['class_name'].str.contains('person').any()) and previous_object_ID in list(objects.keys()):
                    for (objectID, centroid) in objects.items():
                        text = "ID {}".format(objectID)
                        cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                        cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)

                    day = datetime.now().strftime("%Y%m%d")
                    directory = os.path.join(IMAGE_FOLDER, 'pi', day)
                    if not os.path.exists(directory):
                        os.makedirs(directory)
                    ids = "-".join(list([str(i) for i in objects.keys()]))
                    hour = datetime.now().strftime("%H%M%S")
                    filename_output = os.path.join(
                            directory, "{}_person_{}_.jpg".format(hour, ids)
                            )
                    cv2.imwrite(filename_output, img)
                time.sleep(0.300) 
Example 25
Project: object-detection   Author: cristianpb   File: app.py    License: MIT License 5 votes vote down vote up
def api_images():
    page = int(request.args.get('page', 0))
    page_size = int(request.args.get('page_size', 16))
    mydate = request.args.get('date', None)
    myyear = request.args.get('year', "????")
    mymonth = request.args.get('month', "??")
    myday = request.args.get('day', "??")
    myhour = request.args.get('hour', "??")
    myminutes = request.args.get('minutes', "??")
    mydetection = request.args.get('detected_object', "*")
    if mydate is not None:
        mydate = (datetime
                  .strptime(mydate, "%d/%m/%Y")
                  .strftime("%Y%m%d")
                  )
        myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', mydate, '*.jpg'),
                            recursive=True)
    elif (myyear != "????" or
          mymonth != "??" or
          myday != "??" or
          myhour != "??" or
          myminutes != "??" or
          mydetection != "*"):
        mypath = os.path.join(
                              IMAGE_FOLDER, '**',
                              f'{myyear}{mymonth}{myday}',
                              f'{myhour.zfill(2)}{myminutes}??*{mydetection}*.jpg')
        myiter = glob.iglob(mypath, recursive=True)
    else:
        myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                            recursive=True)

    start = page * page_size
    end = (page + 1) * page_size
    result = [get_data(i) for i in islice(myiter, start, end)]
    print('->> Start', start, 'end', end, 'len', len(result))
    return json.dumps(result) 
Example 26
Project: object-detection   Author: cristianpb   File: app.py    License: MIT License 5 votes vote down vote up
def list_folder():
    condition = request.args.get('condition', 'year')
    myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                        recursive=True)
    newdict = reduce(lambda a, b: myconditions[condition](a,b), myiter, dict())
    # year = item.split('/')[2][:4]
    # month = item.split('/')[2][4:6]
    # day = item.split('/')[2][6:8]
    # hour = item.split('/')[3][:2]
    # minutes = item.split('/')[3][2:4]
    # return json.dumps({k: v for k, v in sorted(newdict.items(), key=lambda item: item[1], reverse=True)})
    return json.dumps(newdict) 
Example 27
Project: conan-center-index   Author: conan-io   File: conanfile.py    License: MIT License 5 votes vote down vote up
def package(self):
        self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
        env_build = self._configure_autotools()
        env_build.install()
        tools.rmdir(os.path.join(self.package_folder, "share"))
        tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
        for dot_la_files in glob.iglob(os.path.join(self.package_folder, "lib", '*.la')):
            os.remove(dot_la_files) 
Example 28
Project: core   Author: getavalon   File: test_jsonschema_test_suite.py    License: MIT License 5 votes vote down vote up
def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
    if ignore_glob:
        ignore_glob = os.path.join(basedir, ignore_glob)

    def add_test_methods(test_class):
        ignored = set(glob.iglob(ignore_glob))

        for filename in glob.iglob(os.path.join(basedir, tests_glob)):
            if filename in ignored:
                continue

            validating, _ = os.path.splitext(os.path.basename(filename))
            id = itertools.count(1)

            with open(filename) as test_file:
                for case in json.load(test_file):
                    for test in case["tests"]:
                        name = "test_%s_%s_%s" % (
                            validating,
                            next(id),
                            re.sub(r"[\W ]+", "_", test["description"]),
                        )
                        assert not hasattr(test_class, name), name

                        test_case = make_case(
                            data=test["data"],
                            schema=case["schema"],
                            valid=test["valid"],
                            name=name,
                        )
                        test_case = maybe_skip(skip, test_case, case, test)
                        setattr(test_class, name, test_case)

        return test_class
    return add_test_methods 
Example 29
Project: cassandra-migrate   Author: Cobliteam   File: migration.py    License: MIT License 5 votes vote down vote up
def glob_all(cls, base_path, *patterns):
        """Load all paths matching a glob as migrations in sorted order"""

        paths = []
        for pattern in patterns:
            paths.extend(glob.iglob(os.path.join(base_path, pattern)))

        return list(map(cls.load, cls.sort_paths(paths))) 
Example 30
Project: linter-pylama   Author: AtomLinter   File: main.py    License: MIT License 5 votes vote down vote up
def run(self):
        arguments = self.arguments
        wrong_sorted_files = False
        arguments['check'] = True
        for path in self.distribution_files():
            for python_file in glob.iglob(os.path.join(path, '*.py')):
                try:
                    incorrectly_sorted = SortImports(python_file, **arguments).incorrectly_sorted
                    if incorrectly_sorted:
                        wrong_sorted_files = True
                except IOError as e:
                    print("WARNING: Unable to parse file {0} due to {1}".format(python_file, e))
        if wrong_sorted_files:
            exit(1)