Python glob.iglob() Examples

The following are 30 code examples of glob.iglob(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module glob , or try the search function .
Example #1
Source File: procmon.py    From Paradrop with Apache License 2.0 7 votes vote down vote up
def check(self):
        """
        Check that the service is running and consistent with pid file(s).

        Returns True or False.
        """
        # Set of pids (strings) where the command string matches what we are
        # looking for.
        detected_pids = set()

        # Set of pids (strings) that are both running processes and found in
        # pid files.
        consistent_pids = set()

        # Search for running processes that match our command string.
        for proc in psutil.process_iter():
            try:
                if self.cmdstring in proc.name():
                    detected_pids.add(str(proc.pid))

            # We could also get psutil.ZombieProcess or
            # psutil.AccessDenied.  We want those to be logged.
            except psutil.NoSuchProcess:
                pass

        # Search for pid file(s) and check consistency.
        for pidfile in self.pidfiles:
            for path in glob.iglob(pidfile):
                with open(path, 'r') as source:
                    pid = source.read().strip()

                if pid in detected_pids:
                    consistent_pids.add(pid)
                else:
                    # Delete the stale pid file.
                    os.remove(path)

        return len(consistent_pids) > 0 
Example #2
Source File: mq.py    From AUCR with GNU General Public License v3.0 6 votes vote down vote up
def get_mq_yaml_configs():
    """MQ aucr yaml config file from each plugin."""
    mq_yaml_dict_config = {}
    tasks_mq_list = []
    reports_mq_list = []
    analysis_mq_list = []
    for filename in glob.iglob('aucr_app/plugins/**/mqtasks.yml', recursive=True):
        mq_tasks = YamlInfo(filename, "none", "none")
        run = mq_tasks.get()
        if "tasks" in run:
            tasks_mq_list.append(run["tasks"])
        if "reports" in run:
            reports_mq_list.append(run["reports"])
        if "analysis" in run:
            analysis_mq_list.append(run["analysis"])
    mq_yaml_dict_config["tasks"] = tasks_mq_list
    mq_yaml_dict_config["reports"] = reports_mq_list
    mq_yaml_dict_config["analysis"] = analysis_mq_list
    return mq_yaml_dict_config 
Example #3
Source File: imdb.py    From decaNLP with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, path, text_field, label_field, **kwargs):
        """Create an IMDB dataset instance given a path and fields.

        Arguments:
            path: Path to the dataset's highest level directory
            text_field: The field that will be used for text data.
            label_field: The field that will be used for label data.
            Remaining keyword arguments: Passed to the constructor of
                data.Dataset.
        """
        fields = [('text', text_field), ('label', label_field)]
        examples = []

        for label in ['pos', 'neg']:
            for fname in glob.iglob(os.path.join(path, label, '*.txt')):
                with open(fname, 'r') as f:
                    text = f.readline()
                examples.append(data.Example.fromlist([text, label], fields))

        super(IMDb, self).__init__(examples, fields, **kwargs) 
Example #4
Source File: generic.py    From decaNLP with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, path, field, subsample=None, **kwargs):
        fields = [(x, field) for x in self.fields]
        examples = []
        labels = {'neg': 'negative', 'pos': 'positive'}
        question = 'Is this review negative or positive?'

        cache_name = os.path.join(os.path.dirname(path), '.cache', os.path.basename(path), str(subsample))
        if os.path.exists(cache_name):
            print(f'Loading cached data from {cache_name}')
            examples = torch.load(cache_name)
        else:
            for label in ['pos', 'neg']:
                for fname in glob.iglob(os.path.join(path, label, '*.txt')):
                    with open(fname, 'r') as f:
                        context = f.readline()
                    answer = labels[label]
                    context_question = get_context_question(context, question) 
                    examples.append(data.Example.fromlist([context, question, answer, CONTEXT_SPECIAL, QUESTION_SPECIAL, context_question], fields))
                    if subsample is not None and len(examples) > subsample:
                        break
            os.makedirs(os.path.dirname(cache_name), exist_ok=True)
            print(f'Caching data to {cache_name}')
            torch.save(examples, cache_name)
        super(imdb.IMDb, self).__init__(examples, fields, **kwargs) 
Example #5
Source File: consolidate.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def distribution_cost(
    dcop_files: List[str], distribution_file, algo, target
):
    logger.debug(f"analyse file {dcop_files}")

    dcop = load_dcop_from_file(dcop_files)
    path_glob = os.path.abspath(os.path.expanduser(distribution_file))
    distribution_files = sorted(glob.iglob(path_glob))
    for distribution_file in distribution_files:

        try:
            cost, comm, hosting = single_distrib_costs(
                dcop, distribution_file, algo
            )

            csv_writer = csv.writer(target)
            csv_writer.writerow([dcop_files[0], distribution_file, cost, hosting, comm])
        except:
            pass
    return target 
Example #6
Source File: translation.py    From decaNLP with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def clean(path):
        for f_xml in glob.iglob(os.path.join(path, '*.xml')):
            print(f_xml)
            f_txt = os.path.splitext(f_xml)[0]
            with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
                root = ET.parse(f_xml).getroot()[0]
                for doc in root.findall('doc'):
                    for e in doc.findall('seg'):
                        fd_txt.write(e.text.strip() + '\n')

        xml_tags = ['<url', '<keywords', '<talkid', '<description',
                    '<reviewer', '<translator', '<title', '<speaker']
        for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
            print(f_orig)
            f_txt = f_orig.replace('.tags', '')
            with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
                    io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
                for l in fd_orig:
                    if not any(tag in l for tag in xml_tags):
                        fd_txt.write(l.strip() + '\n') 
Example #7
Source File: fileset.py    From typhon with MIT License 6 votes vote down vote up
def _get_matching_files(self, path, regex, start, end,):
        """Yield files that matches the search conditions.

        Args:
            path: Path to the directory that contains the files that should be
                checked.
            regex: A regular expression that should match the filename.
            start: Datetime that defines the start of a time interval.
            end: Datetime that defines the end of a time interval. The time
                coverage of the file should overlap with this interval.

        Yields:
            A FileInfo object with the file path and time coverage
        """

        for filename in glob.iglob(os.path.join(path, "*")):
            if regex.match(filename):
                file_info = self.get_info(filename)

                # Test whether the file is overlapping the interval between
                # start and end date.
                if IntervalTree.interval_overlaps(
                        file_info.times, (start, end))\
                        and not self.is_excluded(file_info):
                    yield file_info 
Example #8
Source File: schema_diff.py    From ec2-api with Apache License 2.0 6 votes vote down vote up
def _migrate_get_earliest_version():
    versions_glob = os.path.join(MIGRATE_REPO, 'versions', '???_*.py')

    versions = []
    for path in glob.iglob(versions_glob):
        filename = os.path.basename(path)
        prefix = filename.split('_', 1)[0]
        try:
            version = int(prefix)
        except ValueError:
            pass
        versions.append(version)

    versions.sort()
    return versions[0]


### Git 
Example #9
Source File: dataset.py    From seamseg with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def __init__(self, in_dir, transform):
        super(ISSTestDataset, self).__init__()
        self.in_dir = in_dir
        self.transform = transform

        # Find all images
        self._images = []
        for img_path in chain(
                *(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
            _, name_with_ext = path.split(img_path)
            idx, _ = path.splitext(name_with_ext)

            with Image.open(img_path) as img_raw:
                size = (img_raw.size[1], img_raw.size[0])

            self._images.append({
                "idx": idx,
                "path": img_path,
                "size": size,
            }) 
Example #10
Source File: __init__.py    From jbox with MIT License 6 votes vote down vote up
def convert(installers, dest_dir, verbose):
    require_pkgresources('wheel convert')

    # Only support wheel convert if pkg_resources is present
    from ..wininst2wheel import bdist_wininst2wheel
    from ..egg2wheel import egg2wheel

    for pat in installers:
        for installer in iglob(pat):
            if os.path.splitext(installer)[1] == '.egg':
                conv = egg2wheel
            else:
                conv = bdist_wininst2wheel
            if verbose:
                sys.stdout.write("{0}... ".format(installer))
                sys.stdout.flush()
            conv(installer, dest_dir)
            if verbose:
                sys.stdout.write("OK\n") 
Example #11
Source File: helloworld.py    From traces with MIT License 6 votes vote down vote up
def read_all(pattern='data/lightbulb-*.csv'):
    """Read all of the CSVs in a directory matching the filename pattern
    as TimeSeries.

    """
    result = []
    for filename in glob.iglob(pattern):
        print('reading', filename, file=sys.stderr)
        ts = traces.TimeSeries.from_csv(
            filename,
            time_column=0,
            time_transform=parse_iso_datetime,
            value_column=1,
            value_transform=int,
            default=0,
        )
        ts.compact()
        result.append(ts)
    return result 
Example #12
Source File: wav2vec_manifest.py    From fairseq with MIT License 6 votes vote down vote up
def main(args):
    assert args.valid_percent >= 0 and args.valid_percent <= 1.

    dir_path = os.path.realpath(args.root)
    search_path = os.path.join(dir_path, '**/*.' + args.ext)
    rand = random.Random(args.seed)

    with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(
            os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
        print(dir_path, file=train_f)
        print(dir_path, file=valid_f)

        for fname in glob.iglob(search_path, recursive=True):
            file_path = os.path.realpath(fname)

            if args.path_must_contain and args.path_must_contain not in file_path:
                continue

            frames = soundfile.info(fname).frames
            dest = train_f if rand.random() > args.valid_percent else valid_f
            print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest) 
Example #13
Source File: utils.py    From AUCR with GNU General Public License v3.0 6 votes vote down vote up
def get_group_permission_navbar():
    """Return group nav list from database."""
    if current_user:
        current_user_id = current_user.id
    else:
        current_user_id = 1
    user_groups_ids["items"] = Group.query.filter_by(username_id=current_user_id).all()
    user_groups_links = {}
    main_list = []
    for items in user_groups_ids["items"]:
        group_object = Groups.query.filter_by(id=items.groups_id).first()
        for filename in glob.iglob('aucr_app/plugins/**/navbar.yml', recursive=True):
            menu_links = YamlInfo(filename, "none", "none")
            run = menu_links.get()
            main_result_list = generate_navbar_list_item("main", run, group_object.name, main_list)
            if main_result_list:
                main_list = main_result_list
    # Used to make sure empty lists are not populating the navbar dictionary
    if main_list:
        user_groups_links["main"] = main_list
    return user_groups_links 
Example #14
Source File: ncm2_core.py    From ncm2 with MIT License 6 votes vote down vote up
def load_plugin(self, _, rtp: str):
        self.update_rtp(rtp)

        for d in rtp.split(','):
            for vs in glob.iglob(path.join(d, 'ncm2-plugin/*.vim')):
                if vs in self._loaded_plugins:
                    continue
                self._loaded_plugins[vs] = True
                logger.info('send vimscript plugin %s', vs)
                self.notify('ncm2#_load_vimscript', vs)

            for py in glob.iglob(path.join(d, 'ncm2-plugin/*.py')):
                if py in self._loaded_plugins:
                    continue
                self._loaded_plugins[py] = True
                logger.info('send python plugin %s', py)
                # async_call to get multiple exceptions properly printed
                self.nvim.async_call(partial(self.load_python, _, py))

            dts = glob.glob(path.join(d, 'pythonx/ncm2_subscope_detector/*.py')) + \
                glob.glob(path.join(d, 'python3/ncm2_subscope_detector/*.py'))
            self.load_subscope_detectors(dts)

        self.notify('ncm2#_au_plugin') 
Example #15
Source File: utils.py    From zun with Apache License 2.0 6 votes vote down vote up
def get_vf_num_by_pci_address(pci_addr):
    """Get the VF number based on a VF's pci address

    A VF is associated with an VF number, which ip link command uses to
    configure it. This number can be obtained from the PCI device filesystem.
    """
    VIRTFN_RE = re.compile(r"virtfn(\d+)")
    virtfns_path = "/sys/bus/pci/devices/%s/physfn/virtfn*" % (pci_addr)
    vf_num = None
    try:
        for vf_path in glob.iglob(virtfns_path):
            if re.search(pci_addr, os.readlink(vf_path)):
                t = VIRTFN_RE.search(vf_path)
                vf_num = t.group(1)
                break
    except Exception:
        pass
    if vf_num is None:
        raise exception.PciDeviceNotFoundById(id=pci_addr)
    return vf_num 
Example #16
Source File: __init__.py    From auto-alt-text-lambda-api with MIT License 6 votes vote down vote up
def convert(installers, dest_dir, verbose):
    require_pkgresources('wheel convert')

    # Only support wheel convert if pkg_resources is present
    from ..wininst2wheel import bdist_wininst2wheel
    from ..egg2wheel import egg2wheel

    for pat in installers:
        for installer in iglob(pat):
            if os.path.splitext(installer)[1] == '.egg':
                conv = egg2wheel
            else:
                conv = bdist_wininst2wheel
            if verbose:
                sys.stdout.write("{0}... ".format(installer))
                sys.stdout.flush()
            conv(installer, dest_dir)
            if verbose:
                sys.stdout.write("OK\n") 
Example #17
Source File: batch.py    From pyDcop with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def input_files_glob(path_glob: str) -> List[str]:
    """
    Find files matching a glob expression.

    Parameters
    ----------
    path_glob: str
        unix style glob expression (e.g. '/home/user/foo/bar*.json')

    Returns
    -------

    """
    path_glob = os.path.abspath(os.path.expanduser(path_glob))
    logger.debug("Looking for files in %s", path_glob)
    return list(glob.iglob(path_glob)) 
Example #18
Source File: utils.py    From How_to_generate_music_in_tensorflow_LIVE with Apache License 2.0 6 votes vote down vote up
def convert_midi2mp3():
    """ Convert all midi files of the given directory to mp3
    """
    input_dir = 'docs/midi/'
    output_dir = 'docs/mp3/'

    assert os.path.exists(input_dir)
    os.makedirs(output_dir, exist_ok=True)

    print('Converting:')
    i = 0
    for filename in glob.iglob(os.path.join(input_dir, '**/*.mid'), recursive=True):
        print(filename)
        in_name = filename
        out_name = os.path.join(output_dir, os.path.splitext(os.path.basename(filename))[0] + '.mp3')
        command = 'timidity {} -Ow -o - | ffmpeg -i - -acodec libmp3lame -ab 64k {}'.format(in_name, out_name)  # TODO: Redirect stdout to avoid polluting the screen (have cleaner printing)
        subprocess.call(command, shell=True)
        i += 1
    print('{} files converted.'.format(i)) 
Example #19
Source File: egg2wheel.py    From jbox with MIT License 5 votes vote down vote up
def main():
    parser = ArgumentParser()
    parser.add_argument('eggs', nargs='*', help="Eggs to convert")
    parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
            help="Directory to store wheels (default %(default)s)")
    parser.add_argument('--verbose', '-v', action='store_true')
    args = parser.parse_args()
    for pat in args.eggs:
        for egg in iglob(pat):
            if args.verbose:
                sys.stdout.write("{0}... ".format(egg))
            egg2wheel(egg, args.dest_dir)
            if args.verbose:
                sys.stdout.write("OK\n") 
Example #20
Source File: scons-configure-cache.py    From arnold-usd with Apache License 2.0 5 votes vote down vote up
def rearrange_cache_entries(current_prefix_len, new_prefix_len):
    '''Move cache files if prefix length changed.

    Move the existing cache files to new directories of the
    appropriate name length and clean up the old directories.
    '''
    print('Changing prefix length from', current_prefix_len,
          'to', new_prefix_len)
    dirs = set()
    old_dirs = set()
    for file in glob.iglob(os.path.join('*', '*')):
        name = os.path.basename(file)
        dname = name[:current_prefix_len].upper()
        if dname not in old_dirs:
            print('Migrating', dname)
            old_dirs.add(dname)
        dname = name[:new_prefix_len].upper()
        if dname not in dirs:
            os.mkdir(dname)
            dirs.add(dname)
        os.rename(file, os.path.join(dname, name))

    # Now delete the original directories
    for dname in old_dirs:
        os.rmdir(dname)


# The configuration dictionary should have one entry per entry in the
# cache config. The value of each entry should include the following:
#   implicit - (optional) This is to allow adding a new config entry and also
#              changing the behaviour of the system at the same time. This
#              indicates the value the config entry would have had if it had
#              been specified.
#   default - The value the config entry should have if it wasn't previously
#             specified
#   command-line - parameters to pass to ArgumentParser.add_argument
#   converter - (optional) Function to call if conversion is required
#               if this configuration entry changes 
Example #21
Source File: test_glob.py    From ironpython2 with Apache License 2.0 5 votes vote down vote up
def glob(self, *parts):
        if len(parts) == 1:
            pattern = parts[0]
        else:
            pattern = os.path.join(*parts)
        p = os.path.join(self.tempdir, pattern)
        res = glob.glob(p)
        self.assertItemsEqual(glob.iglob(p), res)
        ures = [fsdecode(x) for x in res]
        self.assertItemsEqual(glob.glob(fsdecode(p)), ures)
        self.assertItemsEqual(glob.iglob(fsdecode(p)), ures)
        return res 
Example #22
Source File: egg2wheel.py    From auto-alt-text-lambda-api with MIT License 5 votes vote down vote up
def main():
    parser = ArgumentParser()
    parser.add_argument('eggs', nargs='*', help="Eggs to convert")
    parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
            help="Directory to store wheels (default %(default)s)")
    parser.add_argument('--verbose', '-v', action='store_true')
    args = parser.parse_args()
    for pat in args.eggs:
        for egg in iglob(pat):
            if args.verbose:
                sys.stdout.write("{0}... ".format(egg))
            egg2wheel(egg, args.dest_dir)
            if args.verbose:
                sys.stdout.write("OK\n") 
Example #23
Source File: distribution_daemon.py    From rucio with Apache License 2.0 5 votes vote down vote up
def rename_files(tdir, pattern, new_name):
    """
    Renames the files in the dataset according to the RSE
    on which the dataset is being replicated.
    """
    for cnt, file_name in enumerate(glob.iglob(os.path.join(tdir, pattern))):
        logging.info(file_name)
        logging.info(new_name + str(cnt) + '.rnd')
        if not os.path.isfile(os.path.join(tdir, new_name + str(cnt) + '.rnd')):
            logging.info("renaming..")
            os.rename(file_name, os.path.join(tdir, new_name + str(cnt) + '.rnd')) 
Example #24
Source File: wininst2wheel.py    From jbox with MIT License 5 votes vote down vote up
def main():
    parser = ArgumentParser()
    parser.add_argument('installers', nargs='*', help="Installers to convert")
    parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
            help="Directory to store wheels (default %(default)s)")
    parser.add_argument('--verbose', '-v', action='store_true')
    args = parser.parse_args()
    for pat in args.installers:
        for installer in iglob(pat):
            if args.verbose:
                sys.stdout.write("{0}... ".format(installer))
            bdist_wininst2wheel(installer, args.dest_dir)
            if args.verbose:
                sys.stdout.write("OK\n") 
Example #25
Source File: app_svc.py    From caldera with Apache License 2.0 5 votes vote down vote up
def register_contacts(self):
        contact_svc = self.get_service('contact_svc')
        for contact_file in glob.iglob('app/contacts/*.py'):
            contact_module_name = contact_file.replace('/', '.').replace('\\', '.').replace('.py', '')
            contact_class = import_module(contact_module_name).Contact
            await contact_svc.register(contact_class(self.get_services())) 
Example #26
Source File: util.py    From jbox with MIT License 5 votes vote down vote up
def _iglob(path_glob):
    rich_path_glob = RICH_GLOB.split(path_glob, 1)
    if len(rich_path_glob) > 1:
        assert len(rich_path_glob) == 3, rich_path_glob
        prefix, set, suffix = rich_path_glob
        for item in set.split(','):
            for path in _iglob(''.join((prefix, item, suffix))):
                yield path
    else:
        if '**' not in path_glob:
            for item in std_iglob(path_glob):
                yield item
        else:
            prefix, radical = path_glob.split('**', 1)
            if prefix == '':
                prefix = '.'
            if radical == '':
                radical = '*'
            else:
                # we support both
                radical = radical.lstrip('/')
                radical = radical.lstrip('\\')
            for path, dir, files in os.walk(prefix):
                path = os.path.normpath(path)
                for fn in _iglob(os.path.join(path, radical)):
                    yield fn



#
# HTTPSConnection which verifies certificates/matches domains
# 
Example #27
Source File: apk_analyzer.py    From apk_api_key_extractor with Apache License 2.0 5 votes vote down vote up
def extract_native_strings(decoded_apk_folder):
    """
    Extract the strings contained in the native libraries found in a decoded apk

    :param decoded_apk_folder: folder that contains the decoded apk
    :return: a list of strings
    :rtype: list of LibString
    """
    lib_strings = set()
    lib_path = os.path.join(decoded_apk_folder, "lib")
    if os.path.exists(lib_path):
        path_to_be_inspected = None
        arc_priority_list = ["armeabi", "armeabi-v7a", "arm64-v8a", "x86", "x86_64", "mips", "mips64"]
        for arc in arc_priority_list:
            if os.path.exists(os.path.join(lib_path, arc)):
                path_to_be_inspected = os.path.join(lib_path, arc)
                break
        if path_to_be_inspected:
            for filename in glob.iglob(path_to_be_inspected + '/**/*.so', recursive=True):
                logging.debug("Found shared object lib: {0}".format(filename))
                base_filename = os.path.basename(filename)
                if base_filename in lib_blacklist:
                    # if the library is a generic one, we can safely ignore it
                    # since it would probably not contain any interesting information
                    continue
                try:
                    for string in strings(filename, config.shared_object_sections, 4):
                        lib_string = LibString(base_filename, string)
                        if s_filter.pre_filter_mystring(lib_string):
                            lib_strings.add(lib_string)
                except (ELFError, ValueError) as e:
                    logging.error(str(e))
    return lib_strings 
Example #28
Source File: evaluate_analyzer.py    From turkish-morphology with Apache License 2.0 5 votes vote down vote up
def _read_tokens(treebank_dir: str) -> List[str]:
  """Reads tokens from CoNLL data and returns them in a list."""

  def _extract_tokens_from(line: str) -> Generator[str, None, None]:
    """Extracts token from a CoNLL data file line."""
    if line.isspace():  # Empty lines are sentence seperators.
      return

    column = line.split()

    if not len(column) >= 2:
      raise EvaluationError(
          f"Illformed line in source CoNLL data, only {len(column)}"
          f" whitespace separated columns found but word form is expected"
          f" to be on the second column.")

    token = column[1]

    if token != "_":  # It's an inflectional group, not a word form.
      yield from token.split("_")

  def _read_tokens_from(path: str) -> Generator[str, None, None]:
    """Reads tokens from CoNLL data file that lives in the path."""
    logging.info(f"Reading tokens from '{path}'")

    with open(path, "r", encoding="utf-8") as reader:
      line_tokens = (_extract_tokens_from(l) for l in reader)
      yield from itertools.chain.from_iterable(line_tokens)

  paths = glob.iglob(f"{treebank_dir}/*.conll")
  file_tokens = (_read_tokens_from(p) for p in paths)
  tokens = list(itertools.chain.from_iterable(file_tokens))

  if not tokens:
    raise EvaluationError(
        f"No tokens found in treebank files that are under '{treebank_dir}'.")

  return tokens 
Example #29
Source File: learning_svc.py    From caldera with Apache License 2.0 5 votes vote down vote up
def add_parsers(directory):
        parsers = []
        for filepath in glob.iglob('%s/**.py' % directory):
            module = import_module(filepath.replace('/', '.').replace('\\', '.').replace('.py', ''))
            parsers.append(module.Parser())
        return parsers 
Example #30
Source File: data_svc.py    From caldera with Apache License 2.0 5 votes vote down vote up
def _load_sources(self, plugin):
        for filename in glob.iglob('%s/sources/*.yml' % plugin.data_dir, recursive=False):
            await self.load_source_file(filename, plugin.access)