Python os.path.commonprefix() Examples

The following are 30 code examples of os.path.commonprefix(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module os.path , or try the search function .
Example #1
Source File: util.py    From android_universal with MIT License 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #2
Source File: util.py    From Carnets with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #3
Source File: zfs-snapshot-disk-usage-matrix.py    From zfs-snapshot-disk-usage-matrix with MIT License 6 votes vote down vote up
def write_snapshot_disk_usage_matrix(filesystem, suppress_common_prefix=True):
    snapshot_names = snapshots_in_creation_order(filesystem, strip_filesystem=True)
    if suppress_common_prefix:
        suppressed_prefix_len = len(commonprefix(snapshot_names))
    else:
        suppressed_prefix_len = 0
    print_csv([[None]+[name[suppressed_prefix_len:] for name in snapshot_names]]) # Start with Column headers
    for end in range(len(snapshot_names)):
        this_line = [snapshot_names[end][suppressed_prefix_len:]]
        for start in range(len(snapshot_names)):
            if start <= end:
                start_snap = snapshot_names[start]
                end_snap = snapshot_names[end]
                space_used = space_between_snapshots(filesystem,
                                                     start_snap,
                                                     end_snap)
                this_line.append(space_used)
            else:
                this_line.append(None)
        ## Show line we've just done
        print_csv([this_line]) 
Example #4
Source File: util.py    From odoo13-x64 with GNU General Public License v3.0 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #5
Source File: util.py    From Project-New-Reign---Nemesis-Main with GNU General Public License v3.0 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #6
Source File: util.py    From Fluid-Designer with GNU General Public License v3.0 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #7
Source File: util.py    From Imogen with MIT License 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #8
Source File: util.py    From ironpython3 with Apache License 2.0 6 votes vote down vote up
def _common_shorten_repr(*args):
    args = tuple(map(safe_repr, args))
    maxlen = max(map(len, args))
    if maxlen <= _MAX_LENGTH:
        return args

    prefix = commonprefix(args)
    prefixlen = len(prefix)

    common_len = _MAX_LENGTH - \
                 (maxlen - prefixlen + _MIN_BEGIN_LEN + _PLACEHOLDER_LEN)
    if common_len > _MIN_COMMON_LEN:
        assert _MIN_BEGIN_LEN + _PLACEHOLDER_LEN + _MIN_COMMON_LEN + \
               (maxlen - prefixlen) < _MAX_LENGTH
        prefix = _shorten(prefix, _MIN_BEGIN_LEN, common_len)
        return tuple(prefix + s[prefixlen:] for s in args)

    prefix = _shorten(prefix, _MIN_BEGIN_LEN, _MIN_COMMON_LEN)
    return tuple(prefix + _shorten(s[prefixlen:], _MIN_DIFF_LEN, _MIN_END_LEN)
                 for s in args) 
Example #9
Source File: tool_utils.py    From nototools with Apache License 2.0 6 votes vote down vote up
def commonpathprefix(paths):
    """Return the common path prefix and a tuple of the relative subpaths for the
  provided paths.  Uses resolve_path to convert to absolute paths and returns
  the common absolute path.  Some subpaths might be the empty string and joining
  these will produce paths ending in '/', use normpath if you don't want this.

  Python 2.7 only has path.commonprefix, which returns a common string prefix,
  not a common path prefix.
  """
    norm_paths = [resolve_path(p) + path.sep for p in paths]
    prefix = path.dirname(path.commonprefix(norm_paths))
    prefix_len = len(prefix)
    if prefix_len > 1:
        prefix_len += 1  # not '/' so does not end in '/', strip from subpaths
    subpaths = tuple(p[prefix_len:-1] for p in norm_paths)
    return prefix, subpaths 
Example #10
Source File: PathLib.py    From PyFlow with Apache License 2.0 5 votes vote down vote up
def commonprefix(path=("StringPin", [])):
        '''Return the longest path prefix (taken character-by-character) that is a prefix of all paths in list. If list is empty, return the empty string (''). Note that this may return invalid paths because it works a character at a time'''
        return osPath.commonprefix(path) 
Example #11
Source File: util_fname.py    From netharn with Apache License 2.0 5 votes vote down vote up
def dumpsafe(paths, repl='<sl>'):
    """
    enforces that filenames will not conflict.
    Removes common the common prefix, and replaces slashes with <sl>

    Ignore:
        >>> # xdoctest: +REQUIRES(--pygtrie)
        >>> paths = ['foo/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]
        >>> list(dumpsafe(paths, '-'))
    """
    common_pref = commonprefix(paths)
    if not isdir(common_pref):
        im_pref = dirname(common_pref)
        if common_pref[len(im_pref):len(im_pref) + 1] == '/':
            im_pref += '/'
        elif common_pref[len(im_pref):len(im_pref) + 1] == '\\':
            im_pref += '\\'
    else:
        im_pref = common_pref

    start = len(im_pref)
    dump_paths = (
        p[start:].replace('/', repl).replace('\\', repl)  # faster
        # relpath(p, im_pref).replace('/', repl).replace('\\', repl)
        for p in paths
    )
    return dump_paths 
Example #12
Source File: us_ofac.py    From opensanctions with MIT License 5 votes vote down vote up
def date_common_prefix(*dates):
    prefix = commonprefix(dates)[:10]
    if len(prefix) < 10:
        prefix = prefix[:7]
    if len(prefix) < 7:
        prefix = prefix[:4]
    if len(prefix) < 4:
        prefix = None
    return prefix 
Example #13
Source File: s3.py    From cli with MIT License 5 votes vote down vote up
def purge_cloudfront(bucket, paths: List[str]) -> None:
    """
    Invalidate any CloudFront distribution paths which match the given list of
    file paths originating in the given S3 bucket.
    """
    cloudfront = aws.client_with_default_region("cloudfront")

    # Find the common prefix of this fileset, if any.
    prefix = commonprefix(paths)

    # For each CloudFront distribution origin serving from this bucket (with a
    # matching or broader prefix), if any, purge the prefix path.
    for distribution, origin in distribution_origins_for_bucket(cloudfront, bucket.name, prefix):
        purge_prefix(cloudfront, distribution, origin, prefix) 
Example #14
Source File: scripts.py    From ipymd with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def _common_root(files):
    files = [op.realpath(file) for file in files]
    root = op.commonprefix(files)
    if not op.exists(root):
        root = op.dirname(root)
    if root:
        assert op.exists(root)
        assert op.isdir(root), root
    return root 
Example #15
Source File: util.py    From nidaba with GNU General Public License v2.0 5 votes vote down vote up
def _group_by_prefix(data, prefixes):
    """
    Groups a list of input files by longest common prefix over a given list of prefixes
    """
    ret = [[] for _ in prefixes]
    for doc in data:
        ret[sorted(enumerate(commonprefix([doc[1], pre[1]]) for pre in prefixes),
                   key=lambda x: len(x[1]))[-1][0]].append(doc)
    return ret 
Example #16
Source File: pdf_scraper.py    From magpie with MIT License 5 votes vote down vote up
def scrape(file_path):
    pdf = PdfFileReader(file(file_path, 'rb'))
    content = ''
    for i in range(0, pdf.numPages):
        # TODO figure out why line breaks don't seem to be rendering
        content += pdf.getPage(i).extractText() + '\n'
    out_file = '.%s' % path.basename(file_path)
    out_path = path.join(path.dirname(file_path), out_file)
    if options.repo not in path.commonprefix([out_path, options.repo]):
        out_path = path.join(options.repo, options.default_notebook, out_file)
    content = content.encode('ascii', 'ignore')
    f = open(out_path, 'w')
    f.write(content)
    f.close() 
Example #17
Source File: _lcsuffix.py    From abydos with GNU General Public License v3.0 5 votes vote down vote up
def lcsuffix(self, strings: List[str]) -> str:
        """Return the longest common suffix of a list of strings.

        Longest common suffix (LCSuffix).

        Parameters
        ----------
        strings : list of strings
            Strings for comparison

        Returns
        -------
        str
            The longest common suffix

        Examples
        --------
        >>> sfx = LCSuffix()
        >>> sfx.lcsuffix(['cat', 'hat'])
        'at'
        >>> sfx.lcsuffix(['Niall', 'Neil'])
        'l'
        >>> sfx.lcsuffix(['aluminum', 'Catalan'])
        ''
        >>> sfx.lcsuffix(['ATCG', 'TAGC'])
        ''


        .. versionadded:: 0.4.0

        """
        strings = [s[::-1] for s in strings]
        return cast(str, commonprefix(strings)[::-1]) 
Example #18
Source File: _lcprefix.py    From abydos with GNU General Public License v3.0 5 votes vote down vote up
def lcprefix(self, strings: List[str]) -> str:
        """Return the longest common prefix of a list of strings.

        Longest common prefix (LCPrefix).

        Parameters
        ----------
        strings : list of strings
            Strings for comparison

        Returns
        -------
        str
            The longest common prefix

        Examples
        --------
        >>> pfx = LCPrefix()
        >>> pfx.lcprefix(['cat', 'hat'])
        ''
        >>> pfx.lcprefix(['Niall', 'Neil'])
        'N'
        >>> pfx.lcprefix(['aluminum', 'Catalan'])
        ''
        >>> pfx.lcprefix(['ATCG', 'TAGC'])
        ''


        .. versionadded:: 0.4.0

        """
        return cast(str, commonprefix(strings)) 
Example #19
Source File: exampleinclude.py    From airflow with Apache License 2.0 5 votes vote down vote up
def doctree_read(app, doctree):
    """
    Reads documentation tree for the application and register sources in the generated documentation.

    :param app: application
    :param doctree: documentation tree

    :return None

    """
    env = app.builder.env
    if not hasattr(env, "_viewcode_modules"):
        env._viewcode_modules = {}  # type: ignore

    if app.builder.name == "singlehtml":
        return

    for objnode in doctree.traverse(ExampleHeader):
        filepath = objnode.get("filename")
        relative_path = path.relpath(
            filepath, path.commonprefix([app.config.exampleinclude_sourceroot, filepath])
        )
        modname = relative_path.replace("/", ".")[:-3]
        show_button = register_source(app, env, modname)
        onlynode = create_node(env, relative_path, show_button)

        objnode.replace_self(onlynode)
# pylint: enable=protected-access 
Example #20
Source File: getshark.py    From ibeis with Apache License 2.0 5 votes vote down vote up
def postprocess_filenames(parsed, download_dir):
    from os.path import commonprefix, basename  # NOQA
    # Create a new filename
    parsed['new_fpath'] = [join(download_dir, _fname)
                           for _fname in parsed['new_fname']]
    # Remember the original filename
    prefix = commonprefix(parsed['img_url'])
    parsed['orig_fname'] = [url_[len(prefix):] for url_ in parsed['img_url']]
    # Parse out the extension
    parsed['ext'] = [splitext(_fname)[-1] for _fname in parsed['new_fname']]
    return parsed 
Example #21
Source File: lippupiste.py    From linkedevents with MIT License 5 votes vote down vote up
def _update_superevent_details(self, super_event):
        events = super_event.sub_events.filter(deleted=False)
        if not events.exists():
            return
        # values that should be updated in super-event
        update_dict = {}
        # name superevent by common part of the subevent names
        names = {}
        if not super_event.is_user_edited():
            # only save common name if super event has not been named by user
            # some sub events may still have user edited names, use them if present
            for lang in settings.LANGUAGES:
                lang = lang[0]
                names[lang] = []
                lang = lang.replace('-', '_')
                if any([getattr(subevent, 'name_'+lang) for subevent in events]):
                    names[lang].extend([getattr(subevent, 'name_'+lang) for
                                        subevent in events if getattr(subevent, 'name_'+lang, None)])
            super_event_name = {}
            for lang in settings.LANGUAGES:
                lang = lang[0]
                super_event_name[lang] = commonprefix(names[lang]).strip(' -:')
                lang = lang.replace('-', '_')
                if any([getattr(subevent, 'name_'+lang) for subevent in events]):
                    update_dict['name_'+lang] = super_event_name[lang]
        # always update super_event timeframe depending on sub events
        first_event = events.order_by('start_time').first()
        update_dict['start_time'] = first_event.start_time
        update_dict['has_start_time'] = first_event.has_start_time

        last_event = events.order_by('-end_time').first()
        update_dict['end_time'] = last_event.end_time
        update_dict['has_end_time'] = last_event.has_end_time

        if any([value != getattr(super_event, key) for key, value in update_dict.items()]):
            # if something changed, update
            for key, value in update_dict.items():
                setattr(super_event, key, value)
            super_event.save() 
Example #22
Source File: partial_binary.py    From E-Safenet with GNU General Public License v2.0 4 votes vote down vote up
def find_binary_key(text):
	r = text[512:]

	chunks = [r[x:x+512] for x in range(0,len(r),512)]
	store = [None]*512
	for i in range(512):
		store[i] = {}

	for offset in range(512):
		ochunks = [x[offset:] for x in chunks]
		ochunks.sort()

		for s1, s2 in pairwise(ochunks):
			pfx = commonprefix([s1,s2])
			if len(pfx)>16:
				skip = False
				for ofs in store:
					for stored in ofs.keys():
						if pfx in stored:
							skip = True
							break

				if not skip:			
					store[offset][pfx] = store[offset].get(pfx, 0) + 1
					for ofs in range(len(store)):
						for stored in store[ofs].keys():
							if len(stored) < len(pfx) and stored in pfx:
								del(store[ofs][stored])

	key = ['\0']*512
	i = 0

	order = [None]*512

	for ofs in range(len(store)):
		for k in store[ofs].keys():
			if order[len(k)-1] == None:
				order[len(k)-1] = []
			order[len(k)-1].append([k, ofs])

	for o in order:
		if o != None:
			for k, offset in o:
				key[offset:offset+len(k)] = k

	arr = [ord(l) for l in key]
	return arr 
Example #23
Source File: stringable.py    From kubeface with Apache License 2.0 4 votes vote down vote up
def prefixes(self, max_prefixes=1, **fields_dict):
        for (key, value) in fields_dict.items():
            assert key in self.field_names, key
            assert value is None or isinstance(value, list), type(value)

        def make_prefixes(
                template_pieces,
                max_prefixes=max_prefixes,
                fields_dict=fields_dict):
            result = [[]]
            if not template_pieces:
                return result

            (literal, field_name) = template_pieces[0]
            if literal:
                for piece in result:
                    piece.append(literal)

            values = fields_dict.get(field_name)
            if values is None:
                values = self.valid_values.get(field_name)
            if values is not None:
                if len(result) * len(values) > max_prefixes:
                    common_prefix = commonprefix(values)
                    for piece in result:
                        piece.append(common_prefix)
                else:
                    new_result = []
                    for value in values:
                        new_fields_dict = dict(fields_dict)
                        new_fields_dict[field_name] = [value]
                        rest = make_prefixes(
                            template_pieces[1:],
                            max_prefixes=max_prefixes / (
                                len(result) * len(values)),
                            fields_dict=new_fields_dict)
                        for some_rest in rest:
                            new_result.extend(
                                [x + [value] + some_rest for x in result])
                    result = new_result
            return result

        prefix_components = make_prefixes(self.template_pieces)
        assert len(prefix_components) <= max_prefixes
        return [''.join(x) for x in prefix_components] 
Example #24
Source File: regexopt.py    From syntax-highlighting with GNU Affero General Public License v3.0 4 votes vote down vote up
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return open_paren + make_charset(oneletter) + close_paren
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren 
Example #25
Source File: regexopt.py    From android_universal with MIT License 4 votes vote down vote up
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return open_paren + make_charset(oneletter) + close_paren
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren 
Example #26
Source File: regexopt.py    From diaphora with GNU Affero General Public License v3.0 4 votes vote down vote up
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return make_charset(oneletter)
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren 
Example #27
Source File: regexopt.py    From V1EngineeringInc-Docs with Creative Commons Attribution Share Alike 4.0 International 4 votes vote down vote up
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return open_paren + make_charset(oneletter) + close_paren
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren 
Example #28
Source File: regexopt.py    From Carnets with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def regex_opt_inner(strings, open_paren):
    """Return a regex that matches any string in the sorted list of strings."""
    close_paren = open_paren and ')' or ''
    # print strings, repr(open_paren)
    if not strings:
        # print '-> nothing left'
        return ''
    first = strings[0]
    if len(strings) == 1:
        # print '-> only 1 string'
        return open_paren + escape(first) + close_paren
    if not first:
        # print '-> first string empty'
        return open_paren + regex_opt_inner(strings[1:], '(?:') \
            + '?' + close_paren
    if len(first) == 1:
        # multiple one-char strings? make a charset
        oneletter = []
        rest = []
        for s in strings:
            if len(s) == 1:
                oneletter.append(s)
            else:
                rest.append(s)
        if len(oneletter) > 1:  # do we have more than one oneletter string?
            if rest:
                # print '-> 1-character + rest'
                return open_paren + regex_opt_inner(rest, '') + '|' \
                    + make_charset(oneletter) + close_paren
            # print '-> only 1-character'
            return open_paren + make_charset(oneletter) + close_paren
    prefix = commonprefix(strings)
    if prefix:
        plen = len(prefix)
        # we have a prefix for all strings
        # print '-> prefix:', prefix
        return open_paren + escape(prefix) \
            + regex_opt_inner([s[plen:] for s in strings], '(?:') \
            + close_paren
    # is there a suffix?
    strings_rev = [s[::-1] for s in strings]
    suffix = commonprefix(strings_rev)
    if suffix:
        slen = len(suffix)
        # print '-> suffix:', suffix[::-1]
        return open_paren \
            + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
            + escape(suffix[::-1]) + close_paren
    # recurse on common 1-string prefixes
    # print '-> last resort'
    return open_paren + \
        '|'.join(regex_opt_inner(list(group[1]), '')
                 for group in groupby(strings, lambda s: s[0] == first[0])) \
        + close_paren 
Example #29
Source File: CommandLineWidget.py    From topydo with GNU General Public License v3.0 4 votes vote down vote up
def _complete(self):
        """
        Main completion function.

        Gets list of potential completion candidates for currently edited word,
        completes it to the longest common part, and shows convenient completion
        widget (if multiple completions are returned) with currently selected
        candidate highlighted.
        """
        def find_word_start(p_text, p_pos):
            """ Returns position of the beginning of a word ending in p_pos. """
            return p_text.lstrip().rfind(' ', 0, p_pos) + 1

        def get_word_before_pos(p_text, p_pos):
            start = find_word_start(p_text, p_pos)

            return (p_text[start:p_pos], start)

        pos = self.edit_pos
        text = self.edit_text
        completer = self.completer

        word_before_cursor, start = get_word_before_pos(text, pos)
        completions = completer.get_completions(word_before_cursor, start == 0)
        # store slices before and after place for completion
        self._surrounding_text = (text[:start], text[pos:])

        single_completion = len(completions) == 1
        completion_done = single_completion and completions[0] == word_before_cursor

        if completion_done or not completions:
            self.completion_mode = False
            return
        elif single_completion:
            replacement = completions[0]
        else:
            replacement = commonprefix(completions)
            zero_candidate = replacement if replacement else word_before_cursor

            if zero_candidate != completions[0]:
                completions.insert(0, zero_candidate)

            self.completion_box.add_completions(completions)

        self.insert_completion(replacement)
        self.completion_mode = not single_completion 
Example #30
Source File: net_plotter.py    From loss-landscape with MIT License 4 votes vote down vote up
def name_direction_file(args):
    """ Name the direction file that stores the random directions. """

    if args.dir_file:
        assert exists(args.dir_file), "%s does not exist!" % args.dir_file
        return args.dir_file

    dir_file = ""

    file1, file2, file3 = args.model_file, args.model_file2, args.model_file3

    # name for xdirection
    if file2:
        # 1D linear interpolation between two models
        assert exists(file2), file2 + " does not exist!"
        if file1[:file1.rfind('/')] == file2[:file2.rfind('/')]:
            # model_file and model_file2 are under the same folder
            dir_file += file1 + '_' + file2[file2.rfind('/')+1:]
        else:
            # model_file and model_file2 are under different folders
            prefix = commonprefix([file1, file2])
            prefix = prefix[0:prefix.rfind('/')]
            dir_file += file1[:file1.rfind('/')] + '_' + file1[file1.rfind('/')+1:] + '_' + \
                       file2[len(prefix)+1: file2.rfind('/')] + '_' + file2[file2.rfind('/')+1:]
    else:
        dir_file += file1

    dir_file += '_' + args.dir_type
    if args.xignore:
        dir_file += '_xignore=' + args.xignore
    if args.xnorm:
        dir_file += '_xnorm=' + args.xnorm

    # name for ydirection
    if args.y:
        if file3:
            assert exists(file3), "%s does not exist!" % file3
            if file1[:file1.rfind('/')] == file3[:file3.rfind('/')]:
               dir_file += file3
            else:
               # model_file and model_file3 are under different folders
               dir_file += file3[:file3.rfind('/')] + '_' + file3[file3.rfind('/')+1:]
        else:
            if args.yignore:
                dir_file += '_yignore=' + args.yignore
            if args.ynorm:
                dir_file += '_ynorm=' + args.ynorm
            if args.same_dir: # ydirection is the same as xdirection
                dir_file += '_same_dir'

    # index number
    if args.idx > 0: dir_file += '_idx=' + str(args.idx)

    dir_file += ".h5"

    return dir_file