Python itertools.groupby() Examples

The following are 30 code examples of itertools.groupby(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module itertools , or try the search function .
Example #1
Source File: rdd.py    From pyspark-cassandra with Apache License 2.0 7 votes vote down vote up
def _spanning_iterator(self):
        ''' implements basic spanning on the python side operating on Rows '''
        # TODO implement in Java and support not only Rows

        columns = set(str(c) for c in self.columns)

        def spanning_iterator(partition):
            def key_by(columns):
                for row in partition:
                    k = Row(**{c: row.__getattr__(c) for c in columns})
                    for c in columns:
                        del row[c]

                    yield (k, row)

            for g, l in groupby(key_by(columns), itemgetter(0)):
                yield g, list(_[1] for _ in l)

        return spanning_iterator 
Example #2
Source File: events_find.py    From NeuroKit with MIT License 7 votes vote down vote up
def _events_find(event_channel, threshold="auto", threshold_keep="above"):
    binary = signal_binarize(event_channel, threshold=threshold)

    if threshold_keep.lower() != "above":
        binary = np.abs(binary - 1)  # Reverse if events are below

    # Initialize data
    events = {"onset": [], "duration": []}

    index = 0
    for event, group in itertools.groupby(binary):
        duration = len(list(group))
        if event == 1:
            events["onset"].append(index)
            events["duration"].append(duration)
        index += duration

    # Convert to array
    events["onset"] = np.array(events["onset"])
    events["duration"] = np.array(events["duration"])
    return events 
Example #3
Source File: dataset.py    From small_norb with MIT License 6 votes vote down vote up
def group_dataset_by_category_and_instance(self, dataset_split):
        """
        Group small NORB dataset for (category, instance) key
        
        Parameters
        ----------
        dataset_split: str
            Dataset split, can be either 'train' or 'test'

        Returns
        -------
        groups: list
            List of 25 groups of 972 elements each. All examples of each group are
            from the same category and instance
        """
        if dataset_split not in ['train', 'test']:
            raise ValueError('Dataset split "{}" not allowed.'.format(dataset_split))

        groups = []
        for key, group in groupby(iterable=sorted(self.data[dataset_split]),
                                  key=lambda x: (x.category, x.instance)):
            groups.append(list(group))

        return groups 
Example #4
Source File: pre_submission.py    From MPContribs with MIT License 6 votes vote down vote up
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = pd.np.array(re.sub(r"([A-Z])", r" \1", compstr).split()).astype(str)
    sample = ["".join(g) for _, g in groupby(sample, str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]
    return samp_new 
Example #5
Source File: views.py    From MPContribs with MIT License 6 votes vote down vote up
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = re.sub(r"([A-Z])", r" \1", compstr).split()
    sample = ["".join(g) for _, g in groupby(str(sample), str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]

    return samp_new 
Example #6
Source File: common_slow.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def format_mask(x):
    """
    Formats a mask into a readable string.
    Args:
        x (ndarray): an array with the mask;

    Returns:
        A readable string with the mask.
    """
    x = numpy.asanyarray(x)
    if len(x) == 0:
        return "(empty)"
    if x.dtype == bool:
        x = numpy.argwhere(x)[:, 0]
    grps = tuple(list(g) for _, g in groupby(x, lambda n, c=count(): n-next(c)))
    return ",".join("{:d}-{:d}".format(i[0], i[-1]) if len(i) > 1 else "{:d}".format(i[0]) for i in grps) 
Example #7
Source File: gen_command_json.py    From avrae with GNU General Public License v3.0 6 votes vote down vote up
def main(out='commands.json'):
    from dbot import bot
    modules = []

    # helpers
    no_category = '\u200bUncategorized'

    def get_category(command):
        cog = command.cog
        return cog.qualified_name if cog is not None else no_category

    # build an iterator of (category, commands)
    iterator = filter(lambda c: not c.hidden, bot.commands)
    filtered = sorted(iterator, key=get_category)
    to_iterate = itertools.groupby(filtered, key=get_category)

    # add modules to output
    for module, commands in to_iterate:
        modules.append(parse_module(module, commands))

    with open(out, 'w') as f:
        json.dump({
            "modules": modules
        }, f) 
Example #8
Source File: rules.py    From pydfs-lineup-optimizer with MIT License 6 votes vote down vote up
def apply(self, solver, players_dict):
        optimizer = self.optimizer
        extra_positions = optimizer.players_with_same_position
        positions_combinations = set([tuple(sorted(player.positions)) for player in players_dict.keys()
                                      if len(player.positions) > 1])
        for rank, rank_positions in groupby(optimizer.settings.positions, lambda pos: pos.for_rank):
            positions = get_positions_for_optimizer(list(rank_positions), positions_combinations)
            unique_positions = optimizer.available_positions
            players_by_positions = {
                position: {variable for player, variable in players_dict.items()
                           if player.rank == rank and position in player.positions} for position in unique_positions
            }
            for position, places in positions.items():
                extra = 0
                if len(position) == 1:
                    extra = extra_positions.get(position[0], 0)
                players_with_position = set()
                for pos in position:
                    players_with_position.update(players_by_positions[pos])
                solver.add_constraint(players_with_position, None, SolverSign.GTE, places + extra) 
Example #9
Source File: table.py    From pdfplumber with MIT License 6 votes vote down vote up
def merge_edges(edges, snap_tolerance, join_tolerance):
    """
    Using the `snap_edges` and `join_edge_group` methods above, merge a list of edges into a more "seamless" list.
    """
    def get_group(edge):
        if edge["orientation"] == "h":
            return ("h", edge["top"])
        else:
            return ("v", edge["x0"])

    if snap_tolerance > 0:
        edges = snap_edges(edges, snap_tolerance)

    if join_tolerance > 0:
        _sorted = sorted(edges, key=get_group)
        edge_groups = itertools.groupby(_sorted, key=get_group)
        edge_gen = (join_edge_group(items, k[0], join_tolerance)
            for k, items in edge_groups)
        edges = list(itertools.chain(*edge_gen))
    return edges 
Example #10
Source File: command_line.py    From ivona-speak with MIT License 6 votes vote down vote up
def list_voices(access_key, secret_key, voice_language, voice_gender):
    """List available Ivona voices"""
    try:
        ivona_api = IvonaAPI(access_key, secret_key)
    except (ValueError, IvonaAPIException) as e:
        raise click.ClickException("Something went wrong: {}".format(repr(e)))

    click.echo("Listing available voices...")

    voices_list = ivona_api.get_available_voices(
        language=voice_language,
        gender=voice_gender,
    )

    # Group voices by language
    voices_dict = dict()
    data = sorted(voices_list, key=lambda x: x['Language'])
    for k, g in groupby(data, key=lambda x: x['Language']):
        voices_dict[k] = list(g)

    for ln, voices in voices_dict.items():
        voice_names = [v['Name'] for v in voices]
        click.echo("{}: {}".format(ln, ', '.join(voice_names)))

    click.secho("All done", fg='green') 
Example #11
Source File: utils.py    From pdfplumber with MIT License 6 votes vote down vote up
def cluster_objects(objs, attr, tolerance):
    if isinstance(attr, (str, int)):
        attr_getter = itemgetter(attr)
    else:
        attr_getter = attr
    objs = to_list(objs)
    values = map(attr_getter, objs)
    cluster_dict = make_cluster_dict(values, tolerance)

    get_0, get_1 = itemgetter(0), itemgetter(1)

    cluster_tuples = sorted(((obj, cluster_dict.get(attr_getter(obj)))
        for obj in objs), key=get_1)

    grouped = itertools.groupby(cluster_tuples, key=get_1)

    clusters = [ list(map(get_0, v))
        for k, v in grouped ]

    return clusters 
Example #12
Source File: timeline.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def add_transfer_happenings(self):
        """
        Categorize the careers by adm_appl_nbr and acad_prog.stdnt_car_nbr: being the same means same program
        application event. If those were split between departments, then it's an inter-departmental transfer.

        Those need transfer out/in happenings added.
        """
        adm_appl_groups = itertools.groupby(self.careers, lambda c: (c.adm_appl_nbr, c.app_stdnt_car_nbr))
        for (adm_appl_nbr, app_stdnt_car_nbr), careers in adm_appl_groups:
            careers = list(careers)
            if len(careers) == 1:
                continue

            # sort by order they happen: heuristically, effdt of first happening in career
            careers.sort(key=lambda c: c.happenings[0].effdt)

            # we have an inter-department transfer: create transfer in/out happenings
            for c_out, c_in in zip(careers, careers[1:]):
                effdt = c_in.happenings[0].effdt
                t_out = CareerUnitChangeOut(emplid=c_out.emplid, adm_appl_nbr=c_out.adm_appl_nbr, unit=c_out.unit,
                        otherunit=c_in.unit, effdt=effdt, admit_term=c_out.admit_term)
                t_in = CareerUnitChangeIn(emplid=c_in.emplid, adm_appl_nbr=c_in.adm_appl_nbr, unit=c_in.unit,
                        otherunit=c_out.unit, effdt=effdt, admit_term=c_in.admit_term)
                c_out.happenings.append(t_out)
                c_in.happenings.insert(0, t_in) 
Example #13
Source File: cs_gender_exploration.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def get_bad_gpa(self):
        current_semester = Semester.current()
        semesters = [current_semester.name, current_semester.offset_name(-1), current_semester.offset_name(-2)]
        cmpt_acad_progs, eng_acad_progs = get_fas_programs()

        cmpt_gpas = BadGPAsQuery(query_args={
            'acad_progs': cmpt_acad_progs,
            'strms': semesters,
            'gpa': '2.4',
        })
        low_gpas = cmpt_gpas.result()
        self.artifacts.append(low_gpas)

        rows = low_gpas.rows
        rows.sort()
        groups = itertools.groupby(rows, CSGenderExplorationReport.group_bin)
        out_rows = [[prog_gpa[0], prog_gpa[1], prog_gpa[2], len(list(students))] for prog_gpa, students in groups]
        bins = Table()
        bins.append_column('ACAD_PROG_PRIMARY')
        bins.append_column('GENDER')
        bins.append_column('GPA')
        bins.append_column('COUNT')
        bins.rows = out_rows
        self.artifacts.append(bins) 
Example #14
Source File: importer.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def import_joint(extra_where='1=1'):
    """
    Find combined sections and set CourseOffering.config['joint_with'] appropriately.
    """
    db = SIMSConn()
    db.execute("SELECT strm, class_nbr, sctn_combined_id FROM ps_sctn_cmbnd c WHERE c.strm IN %s "
               " AND ("+extra_where+")", (import_semesters(),))

    for k,v in itertools.groupby(db, lambda d: (d[0], d[2])):
        # for each combined offering...
        strm, _ = k
        class_nbrs = [int(class_nbr) for _,class_nbr,_ in v]
        offerings = CourseOffering.objects.filter(semester__name=strm, class_nbr__in=class_nbrs)
        for offering in offerings:
            offering.set_joint_with([o.slug for o in offerings if o != offering])
            offering.save() 
Example #15
Source File: models.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def deduplicate(cls, start_date=None, end_date=None, dry_run=False):
        """
        Remove any EnrolmentHistory objects that aren't adding any new information.
        """
        all_ehs = EnrolmentHistory.objects.order_by('offering', 'date')
        if start_date:
            all_ehs = all_ehs.filter(date__gte=start_date)
        if end_date:
            all_ehs = all_ehs.filter(date__lte=end_date)

        for off_id, ehs in itertools.groupby(all_ehs, key=lambda eh: eh.offering_id):
            # iterate through EnrolmentHistory for this offering and purge any "same as yesterday" entries
            with transaction.atomic():
                current = next(ehs)
                for eh in ehs:
                    if current.is_dup(eh):
                        if not dry_run:
                            eh.delete()
                        else:
                            print('delete', eh)
                    else:
                        current = eh 
Example #16
Source File: views.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def courses_json(request, semester):
    offerings = CourseOffering.objects.filter(semester__name=semester)\
        .exclude(component="CAN").exclude(flags=CourseOffering.flags.combined) \
        .select_related('semester').prefetch_related('meetingtime_set')
    instructors = Member.objects.filter(role='INST', offering__semester__name=semester).select_related('person')
    instr_by_offeringid = dict(
        (oid, list(instr))
        for oid, instr
        in itertools.groupby(instructors, lambda m: m.offering_id)
    )

    resp = HttpResponse(content_type="application/json")
    resp['Content-Disposition'] = 'inline; filename="' + semester + '.json"'
    crs_data = (o.export_dict(instructors=instr_by_offeringid.get(o.id, [])) for o in offerings)
    json.dump({'courses': list(crs_data)}, resp, indent=1)
    return resp 
Example #17
Source File: views.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def _setup_download(request: HttpRequest, course_slug: str, activity_slug: str):
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)
    versions = QuestionVersion.objects.filter(question__in=questions)
    version_number_lookup = {  # version_number_lookup[question_id][version_id] == version_number
        q_id: {v.id: i+1 for i,v in enumerate(vs)}
        for q_id, vs in itertools.groupby(versions, key=lambda v: v.question_id)
    }

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person', 'question_version', 'question') \
        .order_by('student__person')

    by_student = itertools.groupby(answers, key=lambda a: a.student)
    multiple_versions = len(questions) != len(versions)

    return activity, questions, version_number_lookup, by_student, multiple_versions 
Example #18
Source File: views.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def submissions(request: HttpRequest, course_slug: str, activity_slug: str) -> HttpResponse:
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person') \
        .order_by('student__person')

    students = set(a.student for a in answers)
    starts_ends = quiz.get_starts_ends(students)
    by_student = itertools.groupby(answers, key=lambda a: a.student)
    subs_late = [(member, max(a.modified_at for a in ans) - starts_ends[member][1]) for member, ans in by_student]

    context = {
        'offering': offering,
        'activity': activity,
        'quiz': quiz,
        'subs_late': subs_late,
        'timedelta_zero': datetime.timedelta(seconds=0)
    }
    return render(request, 'quizzes/submissions.html', context=context) 
Example #19
Source File: filters.py    From recruit with Apache License 2.0 5 votes vote down vote up
def do_groupby(environment, value, attribute):
    """Group a sequence of objects by a common attribute.

    If you for example have a list of dicts or objects that represent persons
    with `gender`, `first_name` and `last_name` attributes and you want to
    group all users by genders you can do something like the following
    snippet:

    .. sourcecode:: html+jinja

        <ul>
        {% for group in persons|groupby('gender') %}
            <li>{{ group.grouper }}<ul>
            {% for person in group.list %}
                <li>{{ person.first_name }} {{ person.last_name }}</li>
            {% endfor %}</ul></li>
        {% endfor %}
        </ul>

    Additionally it's possible to use tuple unpacking for the grouper and
    list:

    .. sourcecode:: html+jinja

        <ul>
        {% for grouper, list in persons|groupby('gender') %}
            ...
        {% endfor %}
        </ul>

    As you can see the item we're grouping by is stored in the `grouper`
    attribute and the `list` contains all the objects that have this grouper
    in common.

    .. versionchanged:: 2.6
       It's now possible to use dotted notation to group by the child
       attribute of another attribute.
    """
    expr = make_attrgetter(environment, attribute)
    return [_GroupTuple(key, list(values)) for key, values
            in groupby(sorted(value, key=expr), expr)] 
Example #20
Source File: state.py    From discord.py with MIT License 5 votes vote down vote up
def _delay_ready(self):
        launch = self._ready_state.launch
        while True:
            # this snippet of code is basically waiting 2 * shard_ids seconds
            # until the last GUILD_CREATE was sent
            try:
                await asyncio.wait_for(launch.wait(), timeout=2.0 * len(self.shard_ids))
            except asyncio.TimeoutError:
                break
            else:
                launch.clear()

        guilds = sorted(self._ready_state.guilds, key=lambda g: g[0].shard_id)

        for shard_id, sub_guilds_info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
            sub_guilds, sub_available = zip(*sub_guilds_info)
            if self._fetch_offline:
                await self.request_offline_members(sub_guilds, shard_id=shard_id)

            for guild, unavailable in zip(sub_guilds, sub_available):
                if unavailable is False:
                    self.dispatch('guild_available', guild)
                else:
                    self.dispatch('guild_join', guild)
            self.dispatch('shard_ready', shard_id)

        # remove the state
        try:
            del self._ready_state
        except AttributeError:
            pass # already been deleted somehow

        # regular users cannot shard so we won't worry about it here.

        # clear the current task
        self._ready_task = None

        # dispatch the event
        self.call_handlers('ready')
        self.dispatch('ready') 
Example #21
Source File: views.py    From coursys with GNU General Public License v3.0 5 votes vote down vote up
def index(request):
    sub_units = Unit.sub_units(request.units)
    fac_roles = Role.objects.filter(role='FAC', unit__in=sub_units).select_related('person', 'unit').order_by('person')

    fac_roles_gone = [r for r in fac_roles if r.gone]
    fac_roles_gone = itertools.groupby(fac_roles_gone, key=lambda ro: ro.person)
    fac_roles_gone = [(p, [r.unit for r in roles], CareerEvent.current_ranks(p.id)) for p, roles in fac_roles_gone]

    fac_roles = [r for r in fac_roles if not r.gone]
    fac_roles = itertools.groupby(fac_roles, key=lambda ro: ro.person)
    fac_roles = [(p, [r.unit for r in roles], CareerEvent.current_ranks(p.id)) for p, roles in fac_roles]

    editor = get_object_or_404(Person, userid=request.user.username)
    events = CareerEvent.objects.filter(status='NA').only_subunits(request.units)
    events = [e.get_handler() for e in events]
    events = [h for h in events if h.can_approve(editor)]
    is_admin = Role.objects_fresh.filter(unit__in=request.units, person__userid=request.user.username,
                                         role__in=['ADMN', 'FACA']).exists()
    filterform = UnitFilterForm(sub_units)

    future_people = FuturePerson.objects.visible()

    context = {
        'fac_roles': fac_roles,
        'fac_roles_gone': fac_roles_gone,
        'queued_events': len(events),
        'filterform': filterform,
        'viewvisas': request.GET.get('viewvisas', False),
        'future_people': future_people,
        'is_admin': is_admin,
    }
    return render(request, 'faculty/index.html', context) 
Example #22
Source File: config.py    From recruit with Apache License 2.0 5 votes vote down vote up
def pp_options_list(keys, width=80, _print=False):
    """ Builds a concise listing of available options, grouped by prefix """

    from textwrap import wrap
    from itertools import groupby

    def pp(name, ks):
        pfx = ('- ' + name + '.[' if name else '')
        ls = wrap(', '.join(ks), width, initial_indent=pfx,
                  subsequent_indent='  ', break_long_words=False)
        if ls and ls[-1] and name:
            ls[-1] = ls[-1] + ']'
        return ls

    ls = []
    singles = [x for x in sorted(keys) if x.find('.') < 0]
    if singles:
        ls += pp('', singles)
    keys = [x for x in keys if x.find('.') >= 0]

    for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
        ks = [x[len(k) + 1:] for x in list(g)]
        ls += pp(k, ks)
    s = '\n'.join(ls)
    if _print:
        print(s)
    else:
        return s

#
# helpers 
Example #23
Source File: help.py    From discord.py with MIT License 5 votes vote down vote up
def send_bot_help(self, mapping):
        ctx = self.context
        bot = ctx.bot

        if bot.description:
            # <description> portion
            self.paginator.add_line(bot.description, empty=True)

        no_category = '\u200b{0.no_category}:'.format(self)
        def get_category(command, *, no_category=no_category):
            cog = command.cog
            return cog.qualified_name + ':' if cog is not None else no_category

        filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
        max_size = self.get_max_size(filtered)
        to_iterate = itertools.groupby(filtered, key=get_category)

        # Now we can add the commands to the page.
        for category, commands in to_iterate:
            commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
            self.add_indented_commands(commands, heading=category, max_size=max_size)

        note = self.get_ending_note()
        if note:
            self.paginator.add_line()
            self.paginator.add_line(note)

        await self.send_pages() 
Example #24
Source File: help.py    From discord.py with MIT License 5 votes vote down vote up
def send_bot_help(self, mapping):
        ctx = self.context
        bot = ctx.bot

        if bot.description:
            self.paginator.add_line(bot.description, empty=True)

        note = self.get_opening_note()
        if note:
            self.paginator.add_line(note, empty=True)

        no_category = '\u200b{0.no_category}'.format(self)
        def get_category(command, *, no_category=no_category):
            cog = command.cog
            return cog.qualified_name if cog is not None else no_category

        filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
        to_iterate = itertools.groupby(filtered, key=get_category)

        for category, commands in to_iterate:
            commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)
            self.add_bot_commands_formatting(commands, category)

        note = self.get_ending_note()
        if note:
            self.paginator.add_line()
            self.paginator.add_line(note)

        await self.send_pages() 
Example #25
Source File: test_lbaas.py    From kuryr-kubernetes with Apache License 2.0 5 votes vote down vote up
def _generate_endpoints(self, targets):
        def _target_to_port(item):
            _, (listen_port, target_port) = item
            return {'port': target_port, 'name': str(listen_port)}
        port_with_addrs = [
            (p, [e[0] for e in grp])
            for p, grp in itertools.groupby(
                sorted(targets.items()), _target_to_port)]
        return {
            'metadata': {
                'name': 'ep_name',
                'namespace': 'default'
            },
            'subsets': [
                {
                    'addresses': [
                        {
                            'ip': ip,
                            'targetRef': {
                                'kind': k_const.K8S_OBJ_POD,
                                'name': ip,
                                'namespace': 'default'
                            }
                        }
                        for ip in addrs
                    ],
                    'ports': [port]
                }
                for port, addrs in port_with_addrs
            ]
        } 
Example #26
Source File: help.py    From avrae with GNU General Public License v3.0 5 votes vote down vote up
def send_bot_help(self, mapping):
        ctx = self.context
        bot = ctx.bot

        if bot.description:
            # <description> portion
            self.embed_paginator.add_description(bot.description)

        no_category = '\u200bUncategorized'

        def get_category(command):
            cog = command.cog
            return cog.qualified_name if cog is not None else no_category

        filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)
        to_iterate = itertools.groupby(filtered, key=get_category)

        # Now we can add the commands to the page.
        for category, commands in to_iterate:
            commands = sorted(commands, key=lambda c: c.name)
            self.add_commands(commands, heading=category)

        note = self.get_ending_note()
        if note:
            self.embed_paginator.add_field("More Help", note)

        await self.send() 
Example #27
Source File: nfv_parameters.py    From JetPack with Apache License 2.0 5 votes vote down vote up
def range_extract(self, a):
        ranges = []
        for k, iterable in groupby(enumerate(sorted(a)), self.subtract):
            rng = list(iterable)
            if len(rng) == 1:
                s = str(rng[0][1])
            else:
                s = "%s-%s" % (rng[0][1], rng[-1][1])
            ranges.append(s)
        return ranges 
Example #28
Source File: __init__.py    From tox with MIT License 5 votes vote down vote up
def _expand_envstr(envstr):
    # split by commas not in groups
    tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
    envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]

    def expand(env):
        tokens = _ENVSTR_EXPAND_PATTERN.split(env)
        parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
        return ["".join(variant) for variant in itertools.product(*parts)]

    return mapcat(expand, envlist) 
Example #29
Source File: views.py    From MPContribs with MIT License 5 votes vote down vote up
def split_comp(compstr):
    """
    Splits a string containing the composition of a perovskite solid solution into its components
    Chemical composition: (am_1, am_2)(tm_1, tm_2)Ox
    :param compstr: composition as a string
    :return:        am_1, am_2, tm_1, tm_2;
    each of these output variables contains the species and the stoichiometries
    i.e. ("Fe", 0.6)
    """

    am_1, am_2, tm_1, tm_2 = None, None, None, None

    compstr_spl = ["".join(g) for _, g in groupby(str(compstr), str.isalpha)]

    for l in range(len(compstr_spl)):
        try:
            if (
                ptable.Element(compstr_spl[l]).is_alkaline
                or ptable.Element(compstr_spl[l]).is_alkali
                or ptable.Element(compstr_spl[l]).is_rare_earth_metal
            ):
                if am_1 is None:
                    am_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
                elif am_2 is None:
                    am_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
            if ptable.Element(compstr_spl[l]).is_transition_metal and not (
                ptable.Element(compstr_spl[l]).is_rare_earth_metal
            ):
                if tm_1 is None:
                    tm_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
                elif tm_2 is None:
                    tm_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
        # stoichiometries raise ValueErrors in pymatgen .is_alkaline etc., ignore these errors and skip that entry
        except ValueError:
            pass

    return am_1, am_2, tm_1, tm_2 
Example #30
Source File: make_submission.py    From kaggle-rcic-1st with MIT License 5 votes vote down vote up
def assign_unique(self, pool=__builtins__):
        plates = (list(plate) for _, plate in groupby(sorted(self, key=itemgetter(0)),
            key=lambda x: Dataset.split(x[0])[:3]))
        return chain(*pool.map(self._assign_unique_in_plate, plates))