Python os.path.basename() Examples

The following are code examples for showing how to use os.path.basename(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: Manga-colorization---cycle-gan   Author: OValery16   File: get_data.py    Mozilla Public License 2.0 7 votes vote down vote up
def _download_data(self, dataset_url, save_path):
        if not isdir(save_path):
            os.makedirs(save_path)

        base = basename(dataset_url)
        temp_save_path = join(save_path, base)

        with open(temp_save_path, "wb") as f:
            r = requests.get(dataset_url)
            f.write(r.content)

        if base.endswith('.tar.gz'):
            obj = tarfile.open(temp_save_path)
        elif base.endswith('.zip'):
            obj = ZipFile(temp_save_path, 'r')
        else:
            raise ValueError("Unknown File Type: {0}.".format(base))

        self._print("Unpacking Data...")
        obj.extractall(save_path)
        obj.close()
        os.remove(temp_save_path) 
Example 2
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 6 votes vote down vote up
def init_libraries(self, paired_end, max_insert_size, ctr_libs,
                       exp_libs):
        self._paired_end = paired_end
        self._max_insert_size = max_insert_size
        self._ctr_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in ctr_libs]
        self._exp_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in exp_libs]
        # add libs to lib_dict
        for lib_file in exp_libs + ctr_libs:
            if not isfile(lib_file):
                sys.stderr.write("ERROR: The library file {} does not exist.\n"
                                 .format(lib_file))
                sys.exit(1)
            self._lib_dict[splitext(basename(lib_file))[0]] = Library(
                paired_end, max_insert_size, lib_file,
                deepcopy(self._replicon_dict))
        self._lib_names_list = list(self._lib_dict.keys())
        print("The following libraries were initialized:\n"
              "# Experiment libraries\n{0}\n"
              "# Control libraries\n{1}".format(
                  '\n'.join(self._exp_lib_list),
                  '\n'.join(self._ctr_lib_list))) 
Example 3
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 6 votes vote down vote up
def init_libraries(self, paired_end, max_insert_size, ctr_libs,
                       exp_libs):
        self._paired_end = paired_end
        self._max_insert_size = max_insert_size
        self._ctr_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in ctr_libs]
        self._exp_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in exp_libs]
        # add libs to lib_dict
        for lib_file in exp_libs + ctr_libs:
            if not isfile(lib_file):
                sys.stderr.write(
                    "ERROR: The library file {} does not exist.\n".format(
                        lib_file))
                sys.exit(1)
            self._lib_dict[splitext(basename(lib_file))[0]] = Library(
                paired_end, max_insert_size, lib_file,
                deepcopy(self._replicon_dict))
        self._lib_names_list = list(self._lib_dict.keys())
        print("The following libraries were initialized:\n"
              "# Experiment libraries\n{0}\n"
              "# Control libraries\n{1}".format(
                  '\n'.join(self._exp_lib_list),
                  '\n'.join(self._ctr_lib_list))) 
Example 4
Project: incubator-spot   Author: apache   File: file_watcher.py    Apache License 2.0 6 votes vote down vote up
def detect(self, newfile):
        '''
            Called when a new file is generated under the monitoring directory.

        :param newfile: Path to file created recently.
        '''
        self._logger.info(' -------- New File Detected! -------- ')

        filename = basename(newfile)
        # .............................check whether the filename is in the supported list
        if any([x.search(filename) for x in self._regexs]) or not self._regexs:
            self._queue.insert(0, newfile)
            self._logger.info('File "{0}" added to the queue.'.format(newfile))
            return

        self._logger.warning('Filename "%s" is not supported! Skip file...' % filename) 
Example 5
Project: incubator-spot   Author: apache   File: file_watcher.py    Apache License 2.0 6 votes vote down vote up
def stop(self):
        '''
            Signals the current thread to stop and waits until it terminates. This blocks
        the calling thread until it terminates -- either normally or through an unhandled
        exception.

        :raises RuntimeError: If an attempt is made to join the current thread as that
                              would cause a deadlock. It is also an error to join() a
                              thread before it has been started and attemps to do so
                              raises the same exception.
        '''
        self._logger.info('Signal {0} thread to stop normally.'.format(str(self)))
        super(FileWatcher, self).stop()

        self._logger.info('Wait until the {0} thread terminates...'.format(str(self)))
        super(FileWatcher, self).join()

        while not self.is_empty:
            self._logger.debug('Drop "%s" from the queue.' % basename(self._queue.pop()))

        assert self.is_empty, 'Failed to clean the queue.' 
Example 6
Project: cams-tools   Author: kstopa   File: tools.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def convert(grib2_path, path_out, time, format_out=Wgrib2Format.NETCDF):
        """

        :param grib2_in: Grib2 file to be converted
        :param path_out: Output path where each avaiable time (individual grid) will be save.
        :param time: Reference time of the grib2 forecast. Can be get using Time.from_file() method.
        :type time: Time
        :param format_out: One of the ava
        :return:
        """
        files = []
        print("Converting {0} to {1}".format(grib2_path, format_out.value))
        for hour in range(1, time.get_hours_range() + 1):
            hour_num = hour+time.get_base_time()
            out_filename = path.basename(grib2_path).replace(time.value, Converter._format_hour(hour_num)).replace('.grib2', '.{0}'.format(format_out.get_file_extension()))
            out_filepath = path.join(path_out, out_filename)
            cmd = ['wgrib2', grib2_path, '-d', str(hour), format_out.to_cmd(), out_filepath]
            # TODO Add error control. Check if wgrib2 worked
            call(cmd)
            files.append(out_filepath)
        return files 
Example 7
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: script.py    Apache License 2.0 6 votes vote down vote up
def print_usage(actions):
    """Print the usage information.  (Help screen)"""
    actions = sorted(iteritems(actions))
    print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
    print('       %s --help' % basename(sys.argv[0]))
    print()
    print('actions:')
    for name, (func, doc, arguments) in actions:
        print('  %s:' % name)
        for line in doc.splitlines():
            print('    %s' % line)
        if arguments:
            print()
        for arg, shortcut, default, argtype in arguments:
            if isinstance(default, bool):
                print('    %s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg
                ))
            else:
                print('    %-30s%-10s%s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
                    argtype, default
                ))
        print() 
Example 8
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: script.py    Apache License 2.0 6 votes vote down vote up
def print_usage(actions):
    """Print the usage information.  (Help screen)"""
    actions = sorted(iteritems(actions))
    print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
    print('       %s --help' % basename(sys.argv[0]))
    print()
    print('actions:')
    for name, (func, doc, arguments) in actions:
        print('  %s:' % name)
        for line in doc.splitlines():
            print('    %s' % line)
        if arguments:
            print()
        for arg, shortcut, default, argtype in arguments:
            if isinstance(default, bool):
                print('    %s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg
                ))
            else:
                print('    %-30s%-10s%s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
                    argtype, default
                ))
        print() 
Example 9
Project: flasky   Author: RoseOu   File: script.py    MIT License 6 votes vote down vote up
def print_usage(actions):
    """Print the usage information.  (Help screen)"""
    actions = sorted(iteritems(actions))
    print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
    print('       %s --help' % basename(sys.argv[0]))
    print()
    print('actions:')
    for name, (func, doc, arguments) in actions:
        print('  %s:' % name)
        for line in doc.splitlines():
            print('    %s' % line)
        if arguments:
            print()
        for arg, shortcut, default, argtype in arguments:
            if isinstance(default, bool):
                print('    %s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg
                ))
            else:
                print('    %-30s%-10s%s' % (
                    (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
                    argtype, default
                ))
        print() 
Example 10
Project: flasky   Author: RoseOu   File: _compat.py    MIT License 6 votes vote down vote up
def _check_if_pyc(fname):
    """Return True if the extension is .pyc, False if .py
    and None if otherwise"""
    from imp import find_module
    from os.path import realpath, dirname, basename, splitext

    # Normalize the file-path for the find_module()
    filepath = realpath(fname)
    dirpath = dirname(filepath)
    module_name = splitext(basename(filepath))[0]

    # Validate and fetch
    try:
        fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])
    except ImportError:
        raise IOError("Cannot find config file. "
                      "Path maybe incorrect! : {0}".format(filepath))
    return pytype, fileobj, fullpath 
Example 11
Project: MetrixReloaded   Author: Scrounger   File: MetrixReloadedSetup.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def getCurrentColor(self):
        myfile = self.skin_base_dir + self.color_file
        if not path.exists(myfile):
            if path.exists(self.skin_base_dir + self.default_color_file):
                if path.islink(myfile):
                    remove(myfile)
                chdir(self.skin_base_dir)
                symlink(self.default_color_file, self.color_file)
            else:
                return None
        filename = path.realpath(myfile)
        filename = path.basename(filename)

        search_str = 'colors_'
        friendly_name = filename.replace(search_str, "")
        friendly_name = friendly_name.replace(".xml", "")
        friendly_name = friendly_name.replace("_", " ")
        return (filename, friendly_name) 
Example 12
Project: MetrixReloaded   Author: Scrounger   File: MetrixReloadedSetup.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def getCurrentFont(self):
        myfile = self.skin_base_dir + self.font_file
        if not path.exists(myfile):
            if path.exists(self.skin_base_dir + self.default_font_file):
                if path.islink(myfile):
                    remove(myfile)
                chdir(self.skin_base_dir)
                symlink(self.default_font_file, self.font_file)
            else:
                return None
        filename = path.realpath(myfile)
        filename = path.basename(filename)

        search_str = 'font_'
        friendly_name = filename.replace(search_str, "")
        friendly_name = friendly_name.replace(".xml", "")
        friendly_name = friendly_name.replace("_", " ")
        return (filename, friendly_name) 
Example 13
Project: calmjs   Author: calmjs   File: test_toolchain.py    GNU General Public License v2.0 6 votes vote down vote up
def test_toolchain_standard_build_dir_remapped(self):
        """
        This can either be caused by relative paths or symlinks.  Will
        result in the manually specified build_dir being remapped to its
        real location
        """

        fake = mkdtemp(self)
        real = mkdtemp(self)
        real_base = basename(real)
        spec = Spec()
        spec['build_dir'] = join(fake, pardir, real_base)

        with pretty_logging(stream=StringIO()) as s:
            with self.assertRaises(NotImplementedError):
                self.toolchain(spec)

        self.assertIn("realpath of 'build_dir' resolved to", s.getvalue())
        self.assertEqual(spec['build_dir'], real) 
Example 14
Project: calmjs   Author: calmjs   File: test_toolchain.py    GNU General Public License v2.0 6 votes vote down vote up
def test_transpiler_sourcemap(self):
        # a kind of silly test but shows concept
        build_dir = mkdtemp(self)
        srcdir = mkdtemp(self)
        js_code = 'var dummy = function() {\n};\n'
        source = join(srcdir, 'source.js')
        target = 'target.js'

        with open(source, 'w') as fd:
            fd.write(js_code)

        spec = Spec(build_dir=build_dir, generate_source_map=True)
        modname = 'dummy'
        self.toolchain.transpile_modname_source_target(
            spec, modname, source, target)

        with open(join(build_dir, target + '.map')) as fd:
            result = json.load(fd)

        self.assertEqual(result['mappings'], 'AAAA;AACA;')
        self.assertEqual(len(result['sources']), 1)
        self.assertEqual(basename(result['sources'][0]), 'source.js')
        self.assertEqual(result['file'], target) 
Example 15
Project: calmjs   Author: calmjs   File: artifact.py    GNU General Public License v2.0 6 votes vote down vote up
def generate_metadata_entry(self, entry_point, toolchain, spec):
        """
        After the toolchain and spec have been executed, this may be
        called to generate the artifact export entry for persistence
        into the metadata file.
        """

        export_target = spec['export_target']
        toolchain_bases = trace_toolchain(toolchain)
        toolchain_bin_path = spec.get(TOOLCHAIN_BIN_PATH)
        toolchain_bin = ([
            basename(toolchain_bin_path),  # bin_name
            get_bin_version_str(toolchain_bin_path),  # bin_version
        ] if toolchain_bin_path else [])

        return {basename(export_target): {
            'toolchain_bases': toolchain_bases,
            'toolchain_bin': toolchain_bin,
            'builder': '%s:%s' % (
                entry_point.module_name, '.'.join(entry_point.attrs)),
        }} 
Example 16
Project: rowgenerators   Author: Metatab   File: util.py    MIT License 6 votes vote down vote up
def real_files_in_zf(zf):
    """Return a list of internal paths of real files in a zip file, based on the 'external_attr' values"""
    from os.path import basename

    for e in zf.infolist():

        if basename(e.filename).startswith('__') or basename(e.filename).startswith('.'):
            continue

        # I really don't understand external_attr, but no one else seems to either,
        # so we're just hacking here.
        # e.external_attr>>31&1 works when the archive has external attrs set, and a dir heirarchy
        # e.external_attr==0 works in cases where there are no external attrs set
        # e.external_attr==32 is true for some single-file archives.
        if bool(e.external_attr >> 31 & 1 or e.external_attr == 0 or e.external_attr == 32):
            yield e.filename 
Example 17
Project: rowgenerators   Author: Metatab   File: zip.py    MIT License 6 votes vote down vote up
def real_files_in_zf(zf):
        """Return a list of internal paths of real files in a zip file, based on the 'external_attr' values"""
        from os.path import basename

        for e in zf.infolist():

            # Get rid of __MACOS and .DS_whatever
            if basename(e.filename).startswith('__') or basename(e.filename).startswith('.'):
                continue

            # I really don't understand external_attr, but no one else seems to either,
            # so we're just hacking here.
            # e.external_attr>>31&1 works when the archive has external attrs set, and a dir heirarchy
            # e.external_attr==0 works in cases where there are no external attrs set
            # e.external_attr==32 is true for some single-file archives.
            if bool(e.external_attr >> 31 & 1 or e.external_attr == 0 or e.external_attr == 32):
                yield e.filename 
Example 18
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def importee_dir(self, importer_dir, suite_import, check_alternate=True):
        suitename = suite_import.name
        if suitename in self.suitenamemap:
            suitename = self.suitenamemap[suitename]

        # Try use the URL first so that a big repo is cloned to a local
        # directory whose named is based on the repo instead of a suite
        # nested in the big repo.
        base = None
        for urlinfo in suite_import.urlinfos:
            if urlinfo.abs_kind() == 'source':
                # 'https://github.com/graalvm/graal.git' -> 'graal'
                base, _ = os.path.splitext(basename(_urllib_parse.urlparse(urlinfo.url).path))
                if base: break
        if base:
            path = join(SiblingSuiteModel.siblings_dir(importer_dir), base)
        else:
            path = join(SiblingSuiteModel.siblings_dir(importer_dir), suitename)
        checked = self._check_exists(suite_import, path, check_alternate)
        return SuiteModel._checked_to_importee_tuple(checked, suite_import) 
Example 19
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def create_mx_binary_distribution_jar(self):
        """
        Creates a jar file named name-mx.jar that contains
        the metadata for another suite to import this suite as a BinarySuite.
        TODO check timestamps to avoid recreating this repeatedly, or would
        the check dominate anyway?
        TODO It would be cleaner for subsequent loading if we actually wrote a
        transformed suite.py file that only contained distribution info, to
        detect access to private (non-distribution) state
        """
        mxMetaJar = self.mx_binary_distribution_jar_path()
        mxfiles = glob.glob(join(self.mxDir, '*.py'))
        mxfiles += glob.glob(join(self.mxDir, '*.properties'))
        with Archiver(mxMetaJar) as arc:
            for mxfile in mxfiles:
                mxDirBase = basename(self.mxDir)
                arc.zf.write(mxfile, arcname=join(mxDirBase, basename(mxfile))) 
Example 20
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def getDistribution(self, vcdir, distribution):
        suiteName = basename(vcdir)
        reason = distribution.needsUpdate(TimeStampFile(join(vcdir, _mx_binary_distribution_version(suiteName)), followSymlinks=False))
        if not reason:
            return
        log('Updating {} [{}]'.format(distribution, reason))
        metadata = self._readMetadata(vcdir)
        artifactId = distribution.maven_artifact_id()
        groupId = distribution.maven_group_id()
        path = distribution.path[:-len(distribution.localExtension())] + distribution.remoteExtension()
        if distribution.isJARDistribution():
            sourcesPath = distribution.sourcesPath
        else:
            sourcesPath = None
        with SafeFileCreation(path, companion_patterns=["{path}.sha1"]) as sfc, SafeFileCreation(sourcesPath, companion_patterns=["{path}.sha1"]) as sourceSfc:
            self._pull_artifact(metadata, groupId, artifactId, distribution.remoteName(), sfc.tmpPath, sourcePath=sourceSfc.tmpPath, extension=distribution.remoteExtension())
            final_path = distribution.postPull(sfc.tmpPath)
        if final_path:
            os.rename(final_path, distribution.path)
        assert exists(distribution.path)
        distribution.notify_updated() 
Example 21
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def hasJarOnClasspath(self, jar):
        """
        Determines if `jar` is available on the boot class path or in the
        extension/endorsed directories of this JDK.

        :param str jar: jar file name (without directory component)
        :return: the absolute path to the jar file in this JDK matching `jar` or None
        """
        self._init_classpaths()

        if self._bootclasspath:
            for e in self._bootclasspath.split(os.pathsep):
                if basename(e) == jar:
                    return e
        if self._extdirs:
            for d in self._extdirs.split(os.pathsep):
                if len(d) and jar in os.listdir(d):
                    return join(d, jar)
        if self._endorseddirs:
            for d in self._endorseddirs.split(os.pathsep):
                if len(d) and jar in os.listdir(d):
                    return join(d, jar)
        return None 
Example 22
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def get_automatic_module_name(self, modulejar):
        """
        Derives the name of an automatic module from an automatic module jar according to
        specification of ``java.lang.module.ModuleFinder.of(Path... entries)``.

        :param str modulejar: the path to a jar file treated as an automatic module
        :return: the name of the automatic module derived from `modulejar`
        """

        if self.javaCompliance < '9':
            abort('Cannot call get_transitive_requires_keyword() for pre-9 JDK ' + str(self))

        # Drop directory prefix and .jar (or .zip) suffix
        name = os.path.basename(modulejar)[0:-4]

        # Find first occurrence of -${NUMBER}. or -${NUMBER}$
        m = re.search(r'-(\d+(\.|$))', name)
        if m:
            name = name[0:m.start()]

        # Finally clean up the module name (see jdk.internal.module.ModulePath.cleanModuleName())
        name = re.sub(r'[^A-Za-z0-9]', '.', name) # replace non-alphanumeric
        name = re.sub(r'(\.)(\1)+', '.', name) # collapse repeating dots
        name = re.sub(r'^\.', '', name) # drop leading dots
        return re.sub(r'\.$', '', name) # drop trailing dots 
Example 23
Project: PEAKachu   Author: tbischler   File: library.py    ISC License 5 votes vote down vote up
def __init__(self, paired_end, max_insert_size, bam_file, replicon_dict):
        self.paired_end = paired_end
        self.bam_file = bam_file
        self.max_insert_size = max_insert_size
        self.lib_name = splitext(basename(bam_file))[0]
        self.replicon_dict = replicon_dict 
Example 24
Project: pyblish-win   Author: pyblish   File: ScriptBinding.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def run_module_event(self, event):
        """Run the module after setting up the environment.

        First check the syntax.  If OK, make sure the shell is active and
        then transfer the arguments, set the run environment's working
        directory to the directory of the module being executed and also
        add that directory to its sys.path if not already included.

        """
        filename = self.getfilename()
        if not filename:
            return 'break'
        code = self.checksyntax(filename)
        if not code:
            return 'break'
        if not self.tabnanny(filename):
            return 'break'
        interp = self.shell.interp
        if PyShell.use_subprocess:
            interp.restart_subprocess(with_cwd=False)
        dirname = os.path.dirname(filename)
        # XXX Too often this discards arguments the user just set...
        interp.runcommand("""if 1:
            __file__ = {filename!r}
            import sys as _sys
            from os.path import basename as _basename
            if (not _sys.argv or
                _basename(_sys.argv[0]) != _basename(__file__)):
                _sys.argv = [__file__]
            import os as _os
            _os.chdir({dirname!r})
            del _sys, _basename, _os
            \n""".format(filename=filename, dirname=dirname))
        interp.prepend_syspath(filename)
        # XXX KBK 03Jul04 When run w/o subprocess, runtime warnings still
        #         go to __stderr__.  With subprocess, they go to the shell.
        #         Need to change streams in PyShell.ModifiedInterpreter.
        interp.runcode(code)
        return 'break' 
Example 25
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 26
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 27
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: loader.py    MIT License 5 votes vote down vote up
def model_name(file_path):
    file_name = basename(file_path)
    ext = str()
    if '.' in file_name: # exclude extension
        file_name = file_name.split('.')
        ext = file_name[-1]
        file_name = '.'.join(file_name[:-1])
    if ext == str() or ext == 'meta': # ckpt file
        file_name = file_name.split('-')
        num = int(file_name[-1])
        return '-'.join(file_name[:-1])
    if ext == 'weights':
        return file_name 
Example 28
Project: Traffic_sign_detection_YOLO   Author: AmeyaWagh   File: framework.py    MIT License 5 votes vote down vote up
def __init__(self, meta, FLAGS):
        model = basename(meta['model'])
        model = '.'.join(model.split('.')[:-1])
        meta['name'] = model
        
        self.constructor(meta, FLAGS) 
Example 29
Project: DataHack2018   Author: InnovizTech   File: iou_evaluator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_folder(gt_folder, pred_folder, validmode=False):
    # get all ground truth files
    gt_indices = [data_utils.filename_to_frame(file)
                  for file in data_utils.find_sorted_files(gt_folder, 'labels')]
    print('evaluating folder {} with {} frames...'.format(osp.basename(gt_folder), len(gt_indices)))
    agg_tp = 0
    agg_fn = 0
    agg_fp = 0
    for frame_idx in gt_indices:
        pred_file_path = data_utils.frame_to_filename(pred_folder, frame_idx, 'labels')
        gt_labels = data_utils.read_data(gt_folder, frame_idx, 'labels')
        if not osp.isfile(pred_file_path):
            if validmode:
                continue
            else:
                print("No matching prediction file for frame {} in {}, filling zero labels".format(frame_idx, gt_folder))
                pred_labels = np.zeros_like(gt_labels)
        else:
            pred_labels = data_utils.read_data(pred_folder, frame_idx, 'labels')

        if len(gt_labels) != len(pred_labels):
            raise ValueError('GT point count ({}) does not match predicion point count ({}) in file: {}'
                             .format(len(gt_labels), len(pred_labels), frame_idx))

        tp, fn, fp = evaluate_frame(gt_labels, pred_labels)
        agg_tp += tp
        agg_fn += fn
        agg_fp += fp
    return agg_tp, agg_fn, agg_fp 
Example 30
Project: DataHack2018   Author: InnovizTech   File: data_utils.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def filename_to_frame(file_name):
    return int(osp.basename(file_name).split('_')[0]) 
Example 31
Project: rpm2swidtag   Author: swidtags   File: swidtags.py    Apache License 2.0 5 votes vote down vote up
def create_swidtags_d_symlink(self, basename=None):
		if basename:
			target = path.join(SWIDTAG_DIR_DOWNLOAD, basename)
		else:
			basename = self.generated_dirname
			target = path.join(SWIDTAG_DIR_GEN, basename)
		if not path.isdir(self.swidtags_d):
			makedirs(self.swidtags_d)
		src = path.join(self.swidtags_d, basename)
		if not path.islink(src):
			symlink(path.join("../../..", target), src) 
Example 32
Project: rpm2swidtag   Author: swidtags   File: repodata.py    Apache License 2.0 5 votes vote down vote up
def save(self):
		outpath = NamedTemporaryFile(dir=path.dirname(self.path), prefix="." + path.basename(self.path), delete=False)
		self.xml.write(outpath.file, xml_declaration=True, encoding="utf-8", pretty_print=True)
		orig_umask = umask(0)
		umask(orig_umask)
		chmod(outpath.name, 0o666 & ~orig_umask)
		rename(outpath.name, self.path) 
Example 33
Project: s3-uploader   Author: wizart-tech   File: uploader.py    MIT License 5 votes vote down vote up
def upload_file(input_arguments):
    with open(input_arguments.path) as file:
        store_at = input_arguments.s3_path.strip('/')

        prefix = input_arguments.s3_root
        if prefix:
            store_at = "{}/{}".format(prefix, store_at)

        store_at = "{}/{}".format(store_at, path.basename(input_arguments.path))

        client.put_object(
            Bucket=input_arguments.s3_bucket,
            Key=store_at,
            ACL=input_arguments.s3_visibility,
            Body=file.read(),
        ) 
Example 34
Project: VSE-C   Author: ExplorerFreda   File: cli.py    MIT License 5 votes vote down vote up
def escape_desc_name(filename):
    basename = osp.basename(filename)
    if basename.endswith('.py'):
        basename = basename[:-3]
    name = basename.replace('.', '_')
    return name 
Example 35
Project: google_streetview   Author: rrwen   File: api.py    MIT License 5 votes vote down vote up
def download_links(self, dir_path, metadata_file='metadata.json', metadata_status='status', status_ok='OK'):
    """Download Google Street View images from parameter queries if they are available.
    
    Args:
      dir_path (str):
        Path of directory to save downloads of images from :class:`api.results`.links
      metadata_file (str):
         Name of the file with extension to save the :class:`api.results`.metadata
      metadata_status (str):
        Key name of the status value from :class:`api.results`.metadata response from the metadata API request.
      status_ok (str):
        Value from the metadata API response status indicating that an image is available.
    """
    metadata = self.metadata
    if not path.isdir(dir_path):
      makedirs(dir_path)
    
    # (download) Download images if status from metadata is ok
    for i, url in enumerate(self.links):
      if metadata[i][metadata_status] == status_ok:
        file_path = path.join(dir_path, 'gsv_' + str(i) + '.jpg')
        metadata[i]['_file'] = path.basename(file_path) # add file reference
        helpers.download(url, file_path)
    
    # (metadata) Save metadata with file reference
    metadata_path = path.join(dir_path, metadata_file)
    with open(metadata_path, 'w') as out_file:
      json.dump(metadata, out_file) 
Example 36
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 5 votes vote down vote up
def get_lexer_for_filename(_fn, code=None, **options):
    """
    Get a lexer for a filename.  If multiple lexers match the filename
    pattern, use ``analyze_text()`` to figure out which one is more
    appropriate.
    """
    matches = []
    fn = basename(_fn)
    for modname, name, _, filenames, _ in LEXERS.itervalues():
        for filename in filenames:
            if fnmatch.fnmatch(fn, filename):
                if name not in _lexer_cache:
                    _load_lexers(modname)
                matches.append((_lexer_cache[name], filename))
    for cls in find_plugin_lexers():
        for filename in cls.filenames:
            if fnmatch.fnmatch(fn, filename):
                matches.append((cls, filename))

    if sys.version_info > (3,) and isinstance(code, bytes):
        # decode it, since all analyse_text functions expect unicode
        code = code.decode('latin1')

    def get_rating(info):
        cls, filename = info
        # explicit patterns get a bonus
        bonus = '*' not in filename and 0.5 or 0
        # The class _always_ defines analyse_text because it's included in
        # the Lexer class.  The default implementation returns None which
        # gets turned into 0.0.  Run scripts/detect_missing_analyse_text.py
        # to find lexers which need it overridden.
        if code:
            return cls.analyse_text(code) + bonus
        return cls.priority + bonus

    if matches:
        matches.sort(key=get_rating)
        #print "Possible lexers, after sort:", matches
        return matches[-1][0](**options)
    raise ClassNotFound('no lexer for filename %r found' % _fn) 
Example 37
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 5 votes vote down vote up
def guess_lexer_for_filename(_fn, _text, **options):
    """
    Lookup all lexers that handle those filenames primary (``filenames``)
    or secondary (``alias_filenames``). Then run a text analysis for those
    lexers and choose the best result.

    usage::

        >>> from pygments.lexers import guess_lexer_for_filename
        >>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
        <pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
        >>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
        <pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
        >>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
        <pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
    """
    fn = basename(_fn)
    primary = None
    matching_lexers = set()
    for lexer in _iter_lexerclasses():
        for filename in lexer.filenames:
            if fnmatch.fnmatch(fn, filename):
                matching_lexers.add(lexer)
                primary = lexer
        for filename in lexer.alias_filenames:
            if fnmatch.fnmatch(fn, filename):
                matching_lexers.add(lexer)
    if not matching_lexers:
        raise ClassNotFound('no lexer for filename %r found' % fn)
    if len(matching_lexers) == 1:
        return matching_lexers.pop()(**options)
    result = []
    for lexer in matching_lexers:
        rv = lexer.analyse_text(_text)
        if rv == 1.0:
            return lexer(**options)
        result.append((rv, lexer))
    result.sort()
    if not result[-1][0] and primary is not None:
        return primary(**options)
    return result[-1][1](**options) 
Example 38
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 39
Project: Servo   Author: fpsw   File: product.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def update_photo(self):
        """
        Updates this product image with the GSX part image
        """
        if self.component_code and not self.photo:
            try:
                part = parts.Part(partNumber=self.code)
                result = part.fetch_image()
                filename = basename(result)
                self.photo.save(filename, File(open(result)))
            except Exception as e:
                print e 
Example 40
Project: snapchat-email   Author: jag426   File: snapchat-email.py    GNU General Public License v3.0 5 votes vote down vote up
def on_snap(self, snapsender, snap):
        # Construct email message with image attached
        msg = MIMEMultipart()
        msg['Subject'] = 'Snap from ' + snapsender
        msg['From'] = 'SnapchatEmail Bot'
        msg['To'] = COMMASPACE.join([self.recipient])
        msg['Date'] = formatdate(localtime=True)

        msg.attach(MIMEText('attached'))

        with open(snap.file.name, 'rb') as fp:
            attachment = MIMEBase('application', 'octet-stream')
            attachment.set_payload(fp.read())
            encoders.encode_base64(attachment)
            attachment.add_header(
                    'Content-Disposition',
                    'attachment; filename="%s"' % basename(snap.file.name))
            msg.attach(attachment)

        # Connect to SMTP server and send message
        s = smtplib.SMTP(self.smtp)
        s.ehlo()
        s.starttls()
        s.login(self.emailuser, self.emailpass)
        s.sendmail(msg['From'], msg['To'], msg.as_string())
        s.quit()

        print("Emailed snap from " + snapsender + ".") 
Example 41
Project: nlimb   Author: cbschaff   File: logger.py    MIT License 5 votes vote down vote up
def read_tb(path):
    """
    path : a tensorboard file OR a directory, where we will find all TB files
           of the form events.*
    """
    import pandas
    import numpy as np
    from glob import glob
    from collections import defaultdict
    import tensorflow as tf
    if osp.isdir(path):
        fnames = glob(osp.join(path, "events.*"))
    elif osp.basename(path).startswith("events."):
        fnames = [path]
    else:
        raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
    tag2pairs = defaultdict(list)
    maxstep = 0
    for fname in fnames:
        for summary in tf.train.summary_iterator(fname):
            if summary.step > 0:
                for v in summary.summary.value:
                    pair = (summary.step, v.simple_value)
                    tag2pairs[v.tag].append(pair)
                maxstep = max(summary.step, maxstep)
    data = np.empty((maxstep, len(tag2pairs)))
    data[:] = np.nan
    tags = sorted(tag2pairs.keys())
    for (colidx,tag) in enumerate(tags):
        pairs = tag2pairs[tag]
        for (step, value) in pairs:
            data[step-1, colidx] = value
    return pandas.DataFrame(data, columns=tags) 
Example 42
Project: MetrixReloaded   Author: Scrounger   File: MetrixReloadedCover.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def findCover(self, path):
		fpath = p1 = p2 = p3 = ""
		name, ext = os_path.splitext(path)
		ext = ext.lower()

		if os_path.isfile(path):
			dir = os_path.dirname(path)
			p1 = name
			p2 = os_path.join(dir, os_path.basename(dir))

		elif os_path.isdir(path):
			if path.lower().endswith("/bdmv"):
				dir = path[:-5]
				if dir.lower().endswith("/brd"): dir = dir[:-4]
			elif path.lower().endswith("video_ts"):
				dir = path[:-9]
				if dir.lower().endswith("/dvd"): dir = dir[:-4]
			else:
				dir = path
				p2 = os_path.join(dir, "folder")

			prtdir, dirname = os_path.split(dir)
			p1 = os_path.join(dir, dirname)
			p3 = os_path.join(prtdir, dirname)

		pathes = (p1, p2, p3)
		for p in pathes:
			for ext in self.exts:
				path = p + ext
				if os_path.exists(path): break
			if os_path.exists(path):
				fpath = path
				break
		return (p1, fpath) 
Example 43
Project: specio   Author: paris-saclay-cds   File: mzxml.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _read_mzxml(self, mzxml_filename):
            from pyopenms import MzXMLFile, MSExperiment
            file_handler = MzXMLFile()
            experiment = MSExperiment()
            file_handler.load(mzxml_filename, experiment)
            spectra = []
            for sp in experiment:
                sp_converted = self._build_spectrum(sp)
                if sp_converted is not None:
                    sp_converted.meta.update(
                        {'filename': basename(mzxml_filename)})
                    spectra.append(sp_converted)
            return spectra 
Example 44
Project: specio   Author: paris-saclay-cds   File: spc.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _spc_to_numpy(self, spc_file, spc_filename):
            """Convert the SPC File data to spectrum data.

            Parameters
            ----------
            spc_file : spc.File
                The SPC File to be converted.

            spc_filename : string
                The SPC filename to be added to the dictionary.

            Returns
            -------
            spectrum : util.Spectrum
                The converted data.

            """
            meta = self._meta_data_from_spc(spc_file)
            meta['filename'] = basename(spc_filename)
            if spc_file.dat_fmt in ('gx-y', 'x-y'):
                spectrum = np.squeeze([f.y for f in spc_file.sub])
                wavelength = spc_file.x
                return Spectrum(spectrum, wavelength, meta)

            elif spc_file.dat_fmt == '-xy':
                return [Spectrum(f.y, f.x, meta) for f in spc_file.sub]

            else:
                raise ValueError('Unknown file structure.') 
Example 45
Project: specio   Author: paris-saclay-cds   File: test_mzml.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_specread_mzml():
    filename = load_mzml_path()
    spec = specread(filename)
    assert isinstance(spec, list)
    assert all([isinstance(sp, Spectrum) for sp in spec])
    sp = spec[0]
    assert sp.amplitudes.shape == (282,)
    assert sp.wavelength.shape == (282,)
    assert sp.meta['filename'] == basename(filename) 
Example 46
Project: specio   Author: paris-saclay-cds   File: test_mzml.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_get_reader():
    filename = load_mzml_path()
    R = Request(filename)
    F = formats["MZML"]
    assert F.can_read(R)
    reader = F.get_reader(R)
    assert reader.get_length() == 531
    spec = reader.get_data(0)
    assert spec.amplitudes.shape == (282,)
    assert spec.wavelength.shape == (282,)
    assert spec.meta['filename'] == basename(filename)
    assert spec.amplitudes[0] == pytest.approx(37.384331) 
Example 47
Project: specio   Author: paris-saclay-cds   File: test_common.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _check_get_spectra(spec, filename, filename_cmp, spectrum_shape,
                       wavelength_shape):
    assert spec.amplitudes.shape == spectrum_shape
    assert spec.wavelength.shape == wavelength_shape
    if isinstance(spec.meta, tuple):
        for m in spec.meta:
            if filename_cmp:
                assert m['filename'] == basename(filename)
            else:
                assert m['filename']
    else:
        if filename_cmp:
            assert spec.meta['filename'] == basename(filename)
        else:
            assert spec.meta['filename'] 
Example 48
Project: specio   Author: paris-saclay-cds   File: mzml.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _read_mzml(self, mzml_filename):
            from pyopenms import MzMLFile, MSExperiment
            file_handler = MzMLFile()
            experiment = MSExperiment()
            file_handler.load(mzml_filename, experiment)
            spectra = []
            for sp in experiment:
                sp_converted = self._build_spectrum(sp)
                if sp_converted is not None:
                    sp_converted.meta.update(
                        {'filename': basename(mzml_filename)})
                    spectra.append(sp_converted)
            return spectra 
Example 49
Project: lirpg   Author: Hwhitetooth   File: logger.py    MIT License 5 votes vote down vote up
def read_tb(path):
    """
    path : a tensorboard file OR a directory, where we will find all TB files
           of the form events.*
    """
    import pandas
    import numpy as np
    from glob import glob
    from collections import defaultdict
    import tensorflow as tf
    if osp.isdir(path):
        fnames = glob(osp.join(path, "events.*"))
    elif osp.basename(path).startswith("events."):
        fnames = [path]
    else:
        raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
    tag2pairs = defaultdict(list)
    maxstep = 0
    for fname in fnames:
        for summary in tf.train.summary_iterator(fname):
            if summary.step > 0:
                for v in summary.summary.value:
                    pair = (summary.step, v.simple_value)
                    tag2pairs[v.tag].append(pair)
                maxstep = max(summary.step, maxstep)
    data = np.empty((maxstep, len(tag2pairs)))
    data[:] = np.nan
    tags = sorted(tag2pairs.keys())
    for (colidx,tag) in enumerate(tags):
        pairs = tag2pairs[tag]
        for (step, value) in pairs:
            data[step-1, colidx] = value
    return pandas.DataFrame(data, columns=tags) 
Example 50
Project: calmjs   Author: calmjs   File: ui.py    GNU General Public License v2.0 5 votes vote down vote up
def prompt_overwrite_json(original, new, target_path, dumps=json_dumps):
    """
    Prompt end user with a diff of original and new json that may
    overwrite the file at the target_path.  This function only displays
    a confirmation prompt and it is up to the caller to implement the
    actual functionality.  Optionally, a custom json.dumps method can
    also be passed in for output generation.
    """

    # generate compacted ndiff output.
    diff = '\n'.join(l for l in (
        line.rstrip() for line in difflib.ndiff(
            json_dumps(original).splitlines(),
            json_dumps(new).splitlines(),
        ))
        if l[:1] in '?+-' or l[-1:] in '{}' or l[-2:] == '},')
    basename_target = basename(target_path)
    return prompt(
        "Generated '%(basename_target)s' differs with '%(target_path)s'.\n\n"
        "The following is a compacted list of changes required:\n"
        "%(diff)s\n\n"
        "Overwrite '%(target_path)s'?" % locals(),
        choices=(
            ('Yes', True),
            ('No', False),
        ),
        default_key=1,
    ) 
Example 51
Project: calmjs   Author: calmjs   File: toolchain.py    GNU General Public License v2.0 5 votes vote down vote up
def simple_transpile_modname_source_target(
            self, spec, modname, source, target):
        """
        The original simple transpile method called by compile_transpile
        on each target.
        """

        opener = self.opener
        bd_target = self._generate_transpile_target(spec, target)
        logger.info('Transpiling %s to %s', source, bd_target)
        with opener(source, 'r') as reader, opener(bd_target, 'w') as _writer:
            writer = SourceWriter(_writer)
            self.transpiler(spec, reader, writer)
            if writer.mappings and spec.get(GENERATE_SOURCE_MAP):
                source_map_path = bd_target + '.map'
                with open(source_map_path, 'w') as sm_fd:
                    self.dump(encode_sourcemap(
                        filename=bd_target,
                        mappings=writer.mappings,
                        sources=[source],
                    ), sm_fd)

                # just use basename
                source_map_url = basename(source_map_path)
                _writer.write('\n//# sourceMappingURL=')
                _writer.write(source_map_url)
                _writer.write('\n') 
Example 52
Project: rowgenerators   Author: Metatab   File: web.py    MIT License 5 votes vote down vote up
def basename(self):
        from os.path import basename
        return basename(self.path) 
Example 53
Project: rowgenerators   Author: Metatab   File: s3.py    MIT License 5 votes vote down vote up
def resource_file(self):

        from rowgenerators.appurl import parse_app_url
        from rowgenerators.appurl.util import file_ext
        from os.path import basename, join, dirname

        return basename(self.resource_url) 
Example 54
Project: rowgenerators   Author: Metatab   File: s3.py    MIT License 5 votes vote down vote up
def resource_format(self):

        from rowgenerators.appurl import parse_app_url
        from rowgenerators.appurl.util import file_ext
        from os.path import basename, join, dirname

        if self._resource_format:
            return self._resource_format
        else:
            return file_ext(self.resource_file) 
Example 55
Project: rowgenerators   Author: Metatab   File: socrata.py    MIT License 5 votes vote down vote up
def resource_file(self):
        return basename(self.path)+'.csv' 
Example 56
Project: rowgenerators   Author: Metatab   File: download.py    MIT License 5 votes vote down vote up
def cache_path(self, url):
        import hashlib
        from urllib.parse import urlparse
        from os import sep
        from os.path import join, dirname, basename

        url = url.replace('\\', '/')

        # .decode('utf8'). The fs modulegets upset when given strings, so
        # we need to decode to unicode. UTF8 is a WAG.
        try:
            parsed = urlparse(url.decode('utf8'))
        except AttributeError:
            parsed = urlparse(url)

        # Create a name for the file in the cache, based on the URL
        # the '\' replacement is because pyfs only wants to use UNIX path seperators, but
         # python os.path.join will use the one specified for the operating system.
        cache_path = join(parsed.netloc, parsed.path.strip('/'))

        # If there is a query, hash it and add it to the path
        if parsed.query:
            hash = hashlib.sha224(parsed.query.encode('utf8')).hexdigest()
            # We put the hash before the last path element, because that's the target faile, which gets
            # used to figure out what the target format should be.
            cache_path = join(dirname(cache_path), hash, basename(cache_path))

        cache_path  = cache_path.replace(sep,'/')

        return cache_path 
Example 57
Project: rowgenerators   Author: Metatab   File: file.py    MIT License 5 votes vote down vote up
def basename(self):
        raise NotImplementedError() 
Example 58
Project: rowgenerators   Author: Metatab   File: file.py    MIT License 5 votes vote down vote up
def basename(self):
        return self.inner.basename() 
Example 59
Project: rowgenerators   Author: Metatab   File: file.py    MIT License 5 votes vote down vote up
def basename(self):
        return basename(self.fspath) 
Example 60
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def importee_dir(self, importer_dir, suite_import, check_alternate=True):
        suitename = suite_import.name
        if suitename in self.suitenamemap:
            suitename = self.suitenamemap[suitename]
        if basename(importer_dir) == basename(self._primaryDir):
            # primary is importer
            this_imported_suites_dirname = join(importer_dir, NestedImportsSuiteModel._imported_suites_dirname())
            ensure_dir_exists(this_imported_suites_dirname)
            path = join(this_imported_suites_dirname, suitename)
        else:
            path = join(SuiteModel.siblings_dir(importer_dir), suitename)
        checked = self._check_exists(suite_import, path, check_alternate)
        return SuiteModel._checked_to_importee_tuple(checked, suite_import) 
Example 61
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def get_mx_output_dir(self):
        """
        Gets the directory into which mx bookkeeping artifacts should be placed.
        """
        return join(self.get_output_root(), basename(self.mxDir)) 
Example 62
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def _suitename(mxDir):
    parts = basename(mxDir).split('.')
    if len(parts) == 3:
        assert parts[0] == ''
        assert parts[1] == 'mx'
        return parts[2]
    assert len(parts) == 2, parts
    assert parts[0] == 'mx'
    return parts[1] 
Example 63
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def getArchivableResults(self, use_relpath=True, single=False):
        if single:
            raise ValueError("single not supported")
        outputDir = self.output_dir()
        archivePrefix = self.archive_prefix()
        for f in self.getResults():
            if use_relpath:
                filename = self.get_relpath(f, outputDir)
            else:
                filename = basename(f)
            arcname = join(archivePrefix, filename)
            yield f, arcname 
Example 64
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def source_gen_dir_name(self):
        """
        Get the directory name in which source files generated by the annotation processor are found/placed.
        """
        return basename(self.source_gen_dir()) 
Example 65
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def getArchivableResults(self, use_relpath=True, single=False):
        if single:
            raise ValueError("single not supported")
        output = self.getOutput()
        output = join(self.suite.dir, output) if output else None
        for r in self.getResults():
            if output and use_relpath:
                filename = os.path.relpath(r, output)
            else:
                filename = basename(r)
            # Make debug-info files optional for distribution
            if is_debug_lib_file(r) and not os.path.exists(r):
                warn("File {} for archive {} does not exist.".format(filename, self.name))
            else:
                yield r, filename
        if hasattr(self, "headers"):
            srcdir = os.path.join(self.suite.dir, self.dir)
            for h in self.headers:
                if use_relpath:
                    filename = h
                else:
                    filename = basename(h)
                yield os.path.join(srcdir, h), filename


### ~~~~~~~~~~~~~ Build Tasks 
Example 66
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def get_path(self, resolve):
        extract_path = _make_absolute(self.extract_path, self.suite.dir)
        download_path = super(PackedResourceLibrary, self).get_path(resolve)
        if resolve and self._check_extract_needed(extract_path, download_path):
            extract_path_tmp = tempfile.mkdtemp(suffix=basename(extract_path), dir=dirname(extract_path))
            try:
                # extract archive
                Extractor.create(download_path).extract(extract_path_tmp)
                # ensure modification time is up to date
                os.utime(extract_path_tmp, None)
                logv("Moving temporary directory {} to {}".format(extract_path_tmp, extract_path))
                try:
                    # attempt atomic overwrite
                    os.rename(extract_path_tmp, extract_path)
                except OSError:
                    # clean destination & re-try for cases where atomic overwrite doesn't work
                    rmtree(extract_path, ignore_errors=True)
                    os.rename(extract_path_tmp, extract_path)
            except OSError as ose:
                # Rename failed. Race with other process?
                if self._check_extract_needed(extract_path, download_path):
                    # ok something really went wrong
                    abort("Extracting {} failed!".format(download_path), context=ose)
            finally:
                rmtree(extract_path_tmp, ignore_errors=True)

        return extract_path 
Example 67
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def __enter__(self):
        if self.path is not None:
            path_dir = dirname(self.path)
            ensure_dir_exists(path_dir)
            # Temporary file must be on the same file system as self.path for os.rename to be atomic.
            fd, tmp = tempfile.mkstemp(suffix=basename(self.path), dir=path_dir)
            self.tmpFd = fd
            self.tmpPath = tmp
        else:
            self.tmpFd = None
            self.tmpPath = None
        return self 
Example 68
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def _derived_path(base_path, suffix, prefix='.', prepend_dirname=True):
    """
    Gets a path derived from `base_path` by prepending `prefix` and appending `suffix` to
    to the base name of `base_path`.

    :param bool prepend_dirname: if True, `dirname(base_path)` is prepended to the derived base file
    :param bool delete: if True and the derived
    """
    derived = prefix + basename(base_path) + suffix
    if prepend_dirname:
        derived = join(dirname(base_path), derived)
    return derived 
Example 69
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def intellijinit(args, refreshOnly=False, doFsckProjects=True, mx_python_modules=True, java_modules=True,
                 generate_external_projects=True, native_projects=False):
    # In a multiple suite context, the .idea directory in each suite
    # has to be complete and contain information that is repeated
    # in dependent suites.
    declared_modules = set()
    referenced_modules = set()
    sdks = intellij_read_sdks()
    for suite in suites(True) + ([_mx_suite] if mx_python_modules else []):
        _intellij_suite(args, suite, declared_modules, referenced_modules, sdks, refreshOnly, mx_python_modules,
                        generate_external_projects, java_modules and not suite.isBinarySuite(), suite != primary_suite(),
                        generate_native_projects=native_projects)

    if len(referenced_modules - declared_modules) != 0:
        abort('Some referenced modules are missing from modules.xml: {}'.format(referenced_modules - declared_modules))

    if mx_python_modules:
        # mx module
        moduleXml = XMLDoc()
        moduleXml.open('module', attributes={'type': 'PYTHON_MODULE', 'version': '4'})
        moduleXml.open('component', attributes={'name': 'NewModuleRootManager', 'inherit-compiler-output': 'true'})
        moduleXml.element('exclude-output')
        moduleXml.open('content', attributes={'url': 'file://$MODULE_DIR$'})
        moduleXml.element('sourceFolder', attributes={'url': 'file://$MODULE_DIR$', 'isTestSource': 'false'})
        for d in set((p.subDir for p in _mx_suite.projects if p.subDir)):
            moduleXml.element('excludeFolder', attributes={'url': 'file://$MODULE_DIR$/' + d})
        if dirname(_mx_suite.get_output_root()) == _mx_suite.dir:
            moduleXml.element('excludeFolder', attributes={'url': 'file://$MODULE_DIR$/' + basename(_mx_suite.get_output_root())})
        moduleXml.close('content')
        moduleXml.element('orderEntry', attributes={'type': 'jdk', 'jdkType': intellij_python_sdk_type, 'jdkName': intellij_get_python_sdk_name(sdks)})
        moduleXml.element('orderEntry', attributes={'type': 'sourceFolder', 'forTests': 'false'})
        moduleXml.close('component')
        moduleXml.close('module')
        mxModuleFile = join(_mx_suite.dir, basename(_mx_suite.dir) + '.iml')
        update_file(mxModuleFile, moduleXml.xml(indent='  ', newl='\n'))

    if doFsckProjects and not refreshOnly:
        fsckprojects([]) 
Example 70
Project: pyblish-win   Author: pyblish   File: file_util.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def move_file (src, dst, verbose=1, dry_run=0):
    """Move a file 'src' to 'dst'.

    If 'dst' is a directory, the file will be moved into it with the same
    name; otherwise, 'src' is just renamed to 'dst'.  Return the new
    full name of the file.

    Handles cross-device moves on Unix using 'copy_file()'.  What about
    other systems???
    """
    from os.path import exists, isfile, isdir, basename, dirname
    import errno

    if verbose >= 1:
        log.info("moving %s -> %s", src, dst)

    if dry_run:
        return dst

    if not isfile(src):
        raise DistutilsFileError("can't move '%s': not a regular file" % src)

    if isdir(dst):
        dst = os.path.join(dst, basename(src))
    elif exists(dst):
        raise DistutilsFileError(
              "can't move '%s': destination '%s' already exists" %
              (src, dst))

    if not isdir(dirname(dst)):
        raise DistutilsFileError(
              "can't move '%s': destination '%s' not a valid path" % \
              (src, dst))

    copy_it = 0
    try:
        os.rename(src, dst)
    except os.error, (num, msg):
        if num == errno.EXDEV:
            copy_it = 1
        else:
            raise DistutilsFileError(
                  "couldn't move '%s' to '%s': %s" % (src, dst, msg)) 
Example 71
Project: rpm2swidtag   Author: swidtags   File: swidtags.py    Apache License 2.0 4 votes vote down vote up
def run(self):
		if self.opts.swidtagscmd[0] in ( "purge", "sync", "regen" ):
			self.plugin.purge_generated_dir()
			self.plugin.purge_generated_symlink()
		else:
			print("dnf swidtags [sync | purge]")

		if self.opts.swidtagscmd[0] in ( "sync", "regen" ):
			ts = rpm.transaction.initReadOnlyTransaction(root=self.base.conf.installroot)
			pkgs = []
			for p in ts.dbMatch():
				# Filter out imported GPG keys
				if p["arch"]:
					pkgs.append(p)

			dirs = {}
			for r in self.base.repos.iter_enabled():
				if not hasattr(r, "get_metadata_path"):
					continue
				file = r.get_metadata_path(self.plugin.METADATA_TYPE)
				if not file or file == "":
					continue
				s = repodata.Swidtags(None, file)
				tags = s.tags_for_rpm_packages(pkgs)

				remaining_pkgs = []
				for p in pkgs:
					if p not in tags:
						remaining_pkgs.append(p)
						continue
					found = False
					for t in tags[p]:
						logger.debug("Retrieved SWID tag from repodata for %s: %s", get_nevra(p), t.get_tagid())
						x = t.save_to_directory(self.plugin.dir_downloaded)
						dirs[x[0]] = True
						found = True
					if not found:
						remaining_pkgs.append(p)

				pkgs = remaining_pkgs

			for d in dirs:
				self.plugin.create_swidtags_d_symlink(path.basename(d))

			if len(pkgs) > 0:
				run_ret = self.plugin.run_rpm2swidtag_for([ get_nevra(p) for p in pkgs ])
				if run_ret == 0:
					pkgs_missing = {}
					for p in pkgs:
						pkgs_missing[get_checksum(p)] = p
					for f in iglob(path.join(self.plugin.dir_generated, "*-rpm-*.swidtag")):
						m = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\.swidtag$', f)
						if m and m.group(1) in pkgs_missing:
							del pkgs_missing[m.group(1)]
					for p in pkgs_missing.values():
						logger.warning("The SWID tag for rpm %s should have been generated but could not be found", get_nevra(p))
				if run_ret == -2:
					logger.warning("The rpm2swidtag_command not configured for the %s plugin.\nSWID tags not generated locally for %d packages.", NAME, len(pkgs)) 
Example 72
Project: mmdetection   Author: open-mmlab   File: robustness_eval.py    Apache License 2.0 4 votes vote down vote up
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):

    assert aggregate in ['benchmark', 'all']

    if prints == 'all':
        prints = ['P', 'mPC', 'rPC']
    elif isinstance(prints, str):
        prints = [prints]
    for p in prints:
        assert p in ['P', 'mPC', 'rPC']

    eval_output = mmcv.load(filename)

    num_distortions = len(list(eval_output.keys()))
    results = np.zeros((num_distortions, 6, 20), dtype='float32')

    for i, distortion in enumerate(eval_output):
        for severity in eval_output[distortion]:
            mAP = [
                eval_output[distortion][severity][j]['ap']
                for j in range(len(eval_output[distortion][severity]))
            ]
            results[i, severity, :] = mAP

    P = results[0, 0, :]
    if aggregate == 'benchmark':
        mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
    else:
        mPC = np.mean(results[:, 1:, :], axis=(0, 1))
    rPC = mPC / P

    print('\nmodel: {}'.format(osp.basename(filename)))
    if 'P' in prints:
        print('{:48} = {:0.3f}'.format('Performance on Clean Data [P] in AP50',
                                       np.mean(P)))
    if 'mPC' in prints:
        print('{:48} = {:0.3f}'.format(
            'Mean Performance under Corruption [mPC] in AP50', np.mean(mPC)))
    if 'rPC' in prints:
        print('{:48} = {:0.1f}'.format(
            'Realtive Performance under Corruption [rPC] in %',
            np.mean(rPC) * 100))

    return np.mean(results, axis=2, keepdims=True) 
Example 73
Project: spleeter   Author: deezer   File: separator.py    MIT License 4 votes vote down vote up
def separate_to_file(
            self, audio_descriptor, destination,
            audio_adapter=get_default_audio_adapter(),
            offset=0, duration=600., codec='wav', bitrate='128k',
            filename_format='{filename}/{instrument}.{codec}',
            synchronous=True):
        """ Performs source separation and export result to file using
        given audio adapter.

        Filename format should be a Python formattable string that could use
        following parameters : {instrument}, {filename} and {codec}.

        :param audio_descriptor:    Describe song to separate, used by audio
                                    adapter to retrieve and load audio data,
                                    in case of file based audio adapter, such
                                    descriptor would be a file path.
        :param destination:         Target directory to write output to.
        :param audio_adapter:       (Optional) Audio adapter to use for I/O.
        :param offset:              (Optional) Offset of loaded song.
        :param duration:            (Optional) Duration of loaded song.
        :param codec:               (Optional) Export codec.
        :param bitrate:             (Optional) Export bitrate.
        :param filename_format:     (Optional) Filename format.
        :param synchronous:         (Optional) True is should by synchronous.
        """
        waveform, _ = audio_adapter.load(
            audio_descriptor,
            offset=offset,
            duration=duration,
            sample_rate=self._sample_rate)
        sources = self.separate(waveform)
        filename = splitext(basename(audio_descriptor))[0]
        generated = []
        for instrument, data in sources.items():
            path = join(destination, filename_format.format(
                filename=filename,
                instrument=instrument,
                codec=codec))
            if path in generated:
                raise SpleeterError((
                    f'Separated source path conflict : {path},'
                    'please check your filename format'))
            generated.append(path)
            task = self._pool.apply_async(audio_adapter.save, (
                path,
                data,
                self._sample_rate,
                codec,
                bitrate))
            self._tasks.append(task)
        if synchronous:
            self.join() 
Example 74
Project: fleeg-platform   Author: Fleeg   File: utils.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def get_page_info(url):
    page = Article(url)
    page_og = OpenGraph()
    image_url = None
    global_type = None
    page_content = None

    def get_page_head():
        headers = {
            'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.2 '
                          '(KHTML, like Gecko) Chrome/15.0.874.121 Safari/535.2',
        }

        try:
            resp_headers = requests.head(url, headers=headers).headers
        except requests.exceptions.RequestException:
            raise LinkException('Failed to read link.')
        return resp_headers

    def get_title_from_url():
        name_url = splitext(basename(urlsplit(url).path))[0]
        words = re.findall(r'[a-zA-Z0-9]+', name_url)
        return ' '.join([word.capitalize() for word in words])

    def summary_from_text(txt, size=250):
        return txt[:size] if isinstance(txt, str) and len(txt) > size else txt

    def build_tags(*args):
        tags = reduce(operator.add, args)
        return list(filter(lambda x: bool(x), set(tags)))

    page_type, page_subtype = get_page_head()['Content-Type'].split('/')
    page_subtype = re.findall(r'[a-zA-Z0-9]+', page_subtype)[0]

    if page_type == 'image':
        image_url = url
        global_type = page_type

    if page_type == 'text':
        page.download()
        page_content = page.html

        if page_subtype == 'html':
            page_og = OpenGraph(html=page_content)
            page.parse()

    page_text = page.text or page_content

    return {
        'type': page_og.type or global_type or page_subtype,
        'title': page_og.title or page.title or get_title_from_url(),
        'summary': page_og.description or page.meta_description or summary_from_text(page_text),
        'image': page_og.image or page.meta_img or page.top_image or image_url,
        'tags': build_tags(page.meta_keywords, list(page.tags)),
        'publish_date': page.publish_date or None,
        'text': page_text,
    } 
Example 75
Project: mealpy   Author: edmundmok   File: venv_update.py    MIT License 4 votes vote down vote up
def ensure_virtualenv(args, return_values):
    """Ensure we have a valid virtualenv."""
    def adjust_options(options, args):
        # TODO-TEST: proper error message with no arguments
        venv_path = return_values.venv_path = args[0]

        if venv_path == DEFAULT_VIRTUALENV_PATH or options.prompt == '<dirname>':
            from os.path import abspath, basename, dirname
            options.prompt = '(%s)' % basename(dirname(abspath(venv_path)))
        # end of option munging.

        # there are two python interpreters involved here:
        # 1) the interpreter we're instructing virtualenv to copy
        if options.python is None:
            source_python = None
        else:
            source_python = virtualenv.resolve_interpreter(options.python)
        # 2) the interpreter virtualenv will create
        destination_python = venv_python(venv_path)

        if exists(destination_python):
            reason = invalid_virtualenv_reason(venv_path, source_python, destination_python, options)
            if reason:
                info('Removing invalidated virtualenv. (%s)' % reason)
                run(('rm', '-rf', venv_path))
            else:
                info('Keeping valid virtualenv from previous run.')
                raise SystemExit(0)  # looks good! we're done here.

    # this is actually a documented extension point:
    #   http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options
    import virtualenv
    virtualenv.adjust_options = adjust_options

    from sys import argv
    argv[:] = ('virtualenv',) + args
    info(colorize(argv))
    raise_on_failure(virtualenv.main)
    # There might not be a venv_path if doing something like "venv= --version"
    # and not actually asking virtualenv to make a venv.
    if return_values.venv_path is not None:
        run(('rm', '-rf', join(return_values.venv_path, 'local'))) 
Example 76
Project: specio   Author: paris-saclay-cds   File: fsm.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def _read_fsm(fsm_file):
            """Read the fsm file.

            Parameters
            ----------
            fsm_file : file object
                The file object in bytes mode.

            Returns
            -------
            spectrum : Spectrum
                Return a Spectrum instance.

            """
            content = fsm_file.read()

            start_byte = 0
            n_bytes = 4
            signature = content[start_byte:start_byte + n_bytes]

            start_byte += n_bytes
            # the description is fixed to 40 bytes
            n_bytes = 40
            description = content[
                start_byte:start_byte + n_bytes].decode('utf8')

            meta = {'signature': signature,
                    'description': description}
            spectrum = []

            while start_byte + n_bytes < len(content):
                # read block info
                start_byte += n_bytes
                n_bytes = 6
                block_id, block_size = _block_info(
                    content[start_byte:start_byte + n_bytes])
                # read the upcoming block
                start_byte += n_bytes
                n_bytes = block_size
                data_extracted = FUNC_DECODE[block_id](
                    content[start_byte:start_byte + n_bytes])
                if isinstance(data_extracted, dict):
                    meta.update(data_extracted)
                else:
                    spectrum.append(data_extracted)

            spectrum = np.squeeze(spectrum)
            # we add a value such that we include the endpoint
            wavelength = np.arange(meta['z_start'],
                                   meta['z_end'] + meta['z_delta'],
                                   meta['z_delta'])
            if isinstance(fsm_file, string_types):
                meta['filename'] = basename(fsm_file)
            else:
                meta['filename'] = basename(fsm_file.name)

            return Spectrum(spectrum, wavelength, meta) 
Example 77
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 4 votes vote down vote up
def _collectFiles(self, checkBuildReason=False, newestInput=None):
        self.javafilelist = []
        self.nonjavafiletuples = []
        self.nonjavafilecount = 0
        buildReason = None
        outputDir = self.subject.output_dir()
        for sourceDir in self.subject.source_dirs():
            for root, _, files in os.walk(sourceDir, followlinks=True):
                javafiles = [join(root, name) for name in files if name.endswith('.java')]
                self.javafilelist += javafiles
                nonjavafiles = [join(root, name) for name in files if not name.endswith('.java')]
                self.nonjavafiletuples += [(sourceDir, nonjavafiles)]
                self.nonjavafilecount += len(nonjavafiles)

                def findBuildReason():
                    for inputs, inputSuffix, outputSuffix in [(javafiles, 'java', 'class'), (nonjavafiles, None, None)]:
                        for inputFile in inputs:
                            if basename(inputFile) == 'package-info.java':
                                continue
                            if inputSuffix:
                                witness = TimeStampFile(outputDir + inputFile[len(sourceDir):-len(inputSuffix)] + outputSuffix)
                            else:
                                witness = TimeStampFile(outputDir + inputFile[len(sourceDir):])
                            if not witness.exists():
                                return witness.path + ' does not exist'
                            if not self._newestOutput or witness.isNewerThan(self._newestOutput):
                                self._newestOutput = witness
                            if witness.isOlderThan(inputFile):
                                return '{} is older than {}'.format(witness, TimeStampFile(inputFile))
                            if newestInput and witness.isOlderThan(newestInput):
                                return '{} is older than {}'.format(witness, newestInput)
                    return None

                if not buildReason and checkBuildReason:
                    buildReason = findBuildReason()
        self.copyfiles = []
        if hasattr(self.subject, 'copyFiles'):
            for depname, copyMap in self.subject.copyFiles.items():
                dep = dependency(depname)
                if not dep.isProject():
                    abort("Unsupported dependency type: " + dep.name)
                deproot = dep.get_output_root()
                if dep.isNativeProject():
                    deproot = join(dep.suite.dir, dep.getOutput())
                for src, dst in copyMap.items():
                    absolute_src = join(deproot, src)
                    absolute_dst = join(outputDir, dst)
                    self.copyfiles += [(absolute_src, absolute_dst)]
                    witness = TimeStampFile(absolute_dst)
                    if not buildReason and checkBuildReason:
                        if not witness.exists():
                            buildReason = witness.path + ' does not exist'
                        if witness.isOlderThan(absolute_src):
                            buildReason = '{} is older than {}'.format(witness, TimeStampFile(absolute_src))
                    if witness.exists() and (not self._newestOutput or witness.isNewerThan(self._newestOutput)):
                        self._newestOutput = witness

        self.javafilelist = sorted(self.javafilelist)  # for reproducibility
        return buildReason 
Example 78
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 4 votes vote down vote up
def generate_eclipse_workingsets():
    """
    Populate the workspace's working set configuration with working sets generated from project data for the primary suite
    If the workspace already contains working set definitions, the existing ones will be retained and extended.
    In case mx/env does not contain a WORKSPACE definition pointing to the workspace root directory, a parent search from the primary suite directory is performed.
    If no workspace root directory can be identified, the primary suite directory is used and the user has to place the workingsets.xml file by hand.
    """

    # identify the location where to look for workingsets.xml
    wsfilename = 'workingsets.xml'
    wsloc = '.metadata/.plugins/org.eclipse.ui.workbench'
    if 'WORKSPACE' in os.environ:
        expected_wsroot = os.environ['WORKSPACE']
    else:
        expected_wsroot = primary_suite().dir

    wsroot = _find_eclipse_wsroot(expected_wsroot)
    if wsroot is None:
        # failed to find it
        wsroot = expected_wsroot

    wsdir = join(wsroot, wsloc)
    if not exists(wsdir):
        wsdir = wsroot
        logv('Could not find Eclipse metadata directory. Please place ' + wsfilename + ' in ' + wsloc + ' manually.')
    wspath = join(wsdir, wsfilename)

    def _add_to_working_set(key, value):
        if key not in workingSets:
            workingSets[key] = [value]
        else:
            workingSets[key].append(value)

    # gather working set info from project data
    workingSets = dict()
    for p in projects():
        if p.workingSets is None:
            continue
        for w in p.workingSets.split(","):
            _add_to_working_set(w, p.name)

    # the mx metdata directories are included in the appropriate working sets
    _add_to_working_set('MX', 'mxtool')
    for suite in suites(True):
        _add_to_working_set('MX', basename(suite.mxDir))

    if exists(wspath):
        wsdoc = _copy_workingset_xml(wspath, workingSets)
    else:
        wsdoc = _make_workingset_xml(workingSets)

    update_file(wspath, wsdoc.xml(newl='\n'))
    return wsroot 
Example 79
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 4 votes vote down vote up
def fsckprojects(args):
    """find directories corresponding to deleted Java projects and delete them"""
    for suite in suites(True, includeBinary=False):
        projectDirs = [p.dir for p in suite.projects]
        distIdeDirs = [d.get_ide_project_dir() for d in suite.dists if d.isJARDistribution() and d.get_ide_project_dir() is not None]
        for dirpath, dirnames, files in os.walk(suite.dir):
            if dirpath == suite.dir:
                # no point in traversing vc metadata dir, lib, .workspace
                # if there are nested source suites must not scan those now, as they are not in projectDirs (but contain .project files)
                omitted = [suite.mxDir, 'lib', '.workspace', 'mx.imports']
                if suite.vc:
                    omitted.append(suite.vc.metadir())
                dirnames[:] = [d for d in dirnames if d not in omitted]
            elif dirpath == suite.get_output_root():
                # don't want to traverse output dir
                dirnames[:] = []
            elif dirpath == suite.mxDir:
                # don't want to traverse mx.name as it contains a .project
                dirnames[:] = []
            elif dirpath in projectDirs:
                # don't traverse subdirs of an existing project in this suite
                dirnames[:] = []
            elif dirpath in distIdeDirs:
                # don't traverse subdirs of an existing distribution in this suite
                dirnames[:] = []
            else:
                maybe_project = basename(dirpath)
                if not _removedDeps.get(maybe_project):
                    projectConfigFiles = frozenset(['.classpath', '.project', 'nbproject', maybe_project + '.iml'])
                    indicators = projectConfigFiles.intersection(files)
                    if len(indicators) != 0:
                        indicators = [os.path.relpath(join(dirpath, i), suite.vc_dir) for i in indicators]
                        indicatorsInVC = suite.vc.locate(suite.vc_dir, indicators)
                        # Only proceed if there are indicator files that are not under VC
                        if len(indicators) > len(indicatorsInVC):
                            if ask_yes_no(dirpath + ' looks like a removed project -- delete it', 'n'):
                                shutil.rmtree(dirpath)
                                log('Deleted ' + dirpath)
        ideaProjectDirectory = join(suite.dir, '.idea')
        librariesDirectory = join(ideaProjectDirectory, 'libraries')
        if exists(librariesDirectory):
            neededLibraries = set()
            unique_library_file_names = set()
            for p in suite.projects_recursive() + _mx_suite.projects_recursive():
                if not p.isJavaProject():
                    continue
                def processDep(dep, edge):
                    if dep is p:
                        return
                    if dep.isLibrary() or dep.isJARDistribution() or dep.isJdkLibrary() or dep.isMavenProject() or dep.isClasspathDependency():
                        neededLibraries.add(dep)
                p.walk_deps(visit=processDep, ignoredEdges=[DEP_EXCLUDED])
            neededLibraryFiles = frozenset([_intellij_library_file_name(l.name, unique_library_file_names) for l in neededLibraries])
            existingLibraryFiles = frozenset(os.listdir(librariesDirectory))
            for library_file in existingLibraryFiles - neededLibraryFiles:
                file_path = join(librariesDirectory, library_file)
                relative_file_path = os.path.relpath(file_path, os.curdir)
                if ask_yes_no(relative_file_path + ' looks like a removed library -- delete it', 'n'):
                    os.remove(file_path)
                    log('Deleted ' + relative_file_path) 
Example 80
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 4 votes vote down vote up
def sclone(args):
    """clone a suite repository, and its imported suites"""
    parser = ArgumentParser(prog='mx sclone')
    parser.add_argument('--source', help='url/path of repo containing suite', metavar='<url>')
    parser.add_argument('--subdir', help='sub-directory containing the suite in the repository (suite name)')
    parser.add_argument('--dest', help='destination directory (default basename of source)', metavar='<path>')
    parser.add_argument('--revision', help='revision to checkout')
    parser.add_argument("--no-imports", action='store_true', help='do not clone imported suites')
    parser.add_argument("--kind", help='vc kind for URL suites', default='hg')
    parser.add_argument('--ignore-version', action='store_true', help='ignore version mismatch for existing suites')
    parser.add_argument('nonKWArgs', nargs=REMAINDER, metavar='source [dest]...')
    args = parser.parse_args(args)

    warn("The sclone command is deprecated and is scheduled for removal.")

    # check for non keyword args
    if args.source is None:
        args.source = _kwArg(args.nonKWArgs)
    if args.dest is None:
        args.dest = _kwArg(args.nonKWArgs)
    if len(args.nonKWArgs) > 0:
        abort('unrecognized args: ' + ' '.join(args.nonKWArgs))

    if args.source is None:
        # must be primary suite and dest is required
        if primary_suite() is None:
            abort('--source missing and no primary suite found')
        if args.dest is None:
            abort('--dest required when --source is not given')
        source = primary_suite().vc_dir
        if source != primary_suite().dir:
            subdir = os.path.relpath(source, primary_suite().dir)
            if args.subdir and args.subdir != subdir:
                abort("--subdir should be '{}'".format(subdir))
            args.subdir = subdir
    else:
        source = args.source

    if args.dest is not None:
        dest = args.dest
    else:
        dest = basename(source.rstrip('/'))
        if dest.endswith('.git'):
            dest = dest[:-len('.git')]

    dest = os.path.abspath(dest)
    dest_dir = join(dest, args.subdir) if args.subdir else dest
    source = mx_urlrewrites.rewriteurl(source)
    vc = vc_system(args.kind)
    vc.clone(source, dest=dest)
    mxDir = _is_suite_dir(dest_dir)
    if not mxDir:
        warn("'{}' is not an mx suite".format(dest_dir))
        return
    _discover_suites(mxDir, load=False, register=False)