Python itertools.groupby() Examples

The following are 30 code examples for showing how to use itertools.groupby(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may check out the related API usage on the sidebar.

You may also want to check out all available functions/classes of the module itertools , or try the search function .

Example 1
Project: pyspark-cassandra   Author: TargetHolding   File: rdd.py    License: Apache License 2.0 7 votes vote down vote up
def _spanning_iterator(self):
        ''' implements basic spanning on the python side operating on Rows '''
        # TODO implement in Java and support not only Rows

        columns = set(str(c) for c in self.columns)

        def spanning_iterator(partition):
            def key_by(columns):
                for row in partition:
                    k = Row(**{c: row.__getattr__(c) for c in columns})
                    for c in columns:
                        del row[c]

                    yield (k, row)

            for g, l in groupby(key_by(columns), itemgetter(0)):
                yield g, list(_[1] for _ in l)

        return spanning_iterator 
Example 2
Project: NeuroKit   Author: neuropsychology   File: events_find.py    License: MIT License 7 votes vote down vote up
def _events_find(event_channel, threshold="auto", threshold_keep="above"):
    binary = signal_binarize(event_channel, threshold=threshold)

    if threshold_keep.lower() != "above":
        binary = np.abs(binary - 1)  # Reverse if events are below

    # Initialize data
    events = {"onset": [], "duration": []}

    index = 0
    for event, group in itertools.groupby(binary):
        duration = len(list(group))
        if event == 1:
            events["onset"].append(index)
            events["duration"].append(duration)
        index += duration

    # Convert to array
    events["onset"] = np.array(events["onset"])
    events["duration"] = np.array(events["duration"])
    return events 
Example 3
Project: MPContribs   Author: materialsproject   File: views.py    License: MIT License 6 votes vote down vote up
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = re.sub(r"([A-Z])", r" \1", compstr).split()
    sample = ["".join(g) for _, g in groupby(str(sample), str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]

    return samp_new 
Example 4
Project: MPContribs   Author: materialsproject   File: pre_submission.py    License: MIT License 6 votes vote down vote up
def add_comp_one(compstr):
    """
    Adds stoichiometries of 1 to compstr that don't have them
    :param compstr:  composition as a string
    :return:         compositon with stoichiometries of 1 added
    """
    sample = pd.np.array(re.sub(r"([A-Z])", r" \1", compstr).split()).astype(str)
    sample = ["".join(g) for _, g in groupby(sample, str.isalpha)]
    samp_new = ""
    for k in range(len(sample)):
        spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
        for l in range(len(spl_samp)):
            if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
                spl_samp[l] = spl_samp[l] + "1"
            samp_new += spl_samp[l]
    return samp_new 
Example 5
Project: pyscf   Author: pyscf   File: common_slow.py    License: Apache License 2.0 6 votes vote down vote up
def format_mask(x):
    """
    Formats a mask into a readable string.
    Args:
        x (ndarray): an array with the mask;

    Returns:
        A readable string with the mask.
    """
    x = numpy.asanyarray(x)
    if len(x) == 0:
        return "(empty)"
    if x.dtype == bool:
        x = numpy.argwhere(x)[:, 0]
    grps = tuple(list(g) for _, g in groupby(x, lambda n, c=count(): n-next(c)))
    return ",".join("{:d}-{:d}".format(i[0], i[-1]) if len(i) > 1 else "{:d}".format(i[0]) for i in grps) 
Example 6
Project: pydfs-lineup-optimizer   Author: DimaKudosh   File: rules.py    License: MIT License 6 votes vote down vote up
def apply(self, solver, players_dict):
        optimizer = self.optimizer
        extra_positions = optimizer.players_with_same_position
        positions_combinations = set([tuple(sorted(player.positions)) for player in players_dict.keys()
                                      if len(player.positions) > 1])
        for rank, rank_positions in groupby(optimizer.settings.positions, lambda pos: pos.for_rank):
            positions = get_positions_for_optimizer(list(rank_positions), positions_combinations)
            unique_positions = optimizer.available_positions
            players_by_positions = {
                position: {variable for player, variable in players_dict.items()
                           if player.rank == rank and position in player.positions} for position in unique_positions
            }
            for position, places in positions.items():
                extra = 0
                if len(position) == 1:
                    extra = extra_positions.get(position[0], 0)
                players_with_position = set()
                for pos in position:
                    players_with_position.update(players_by_positions[pos])
                solver.add_constraint(players_with_position, None, SolverSign.GTE, places + extra) 
Example 7
Project: pdfplumber   Author: jsvine   File: table.py    License: MIT License 6 votes vote down vote up
def merge_edges(edges, snap_tolerance, join_tolerance):
    """
    Using the `snap_edges` and `join_edge_group` methods above, merge a list of edges into a more "seamless" list.
    """
    def get_group(edge):
        if edge["orientation"] == "h":
            return ("h", edge["top"])
        else:
            return ("v", edge["x0"])

    if snap_tolerance > 0:
        edges = snap_edges(edges, snap_tolerance)

    if join_tolerance > 0:
        _sorted = sorted(edges, key=get_group)
        edge_groups = itertools.groupby(_sorted, key=get_group)
        edge_gen = (join_edge_group(items, k[0], join_tolerance)
            for k, items in edge_groups)
        edges = list(itertools.chain(*edge_gen))
    return edges 
Example 8
Project: pdfplumber   Author: jsvine   File: utils.py    License: MIT License 6 votes vote down vote up
def cluster_objects(objs, attr, tolerance):
    if isinstance(attr, (str, int)):
        attr_getter = itemgetter(attr)
    else:
        attr_getter = attr
    objs = to_list(objs)
    values = map(attr_getter, objs)
    cluster_dict = make_cluster_dict(values, tolerance)

    get_0, get_1 = itemgetter(0), itemgetter(1)

    cluster_tuples = sorted(((obj, cluster_dict.get(attr_getter(obj)))
        for obj in objs), key=get_1)

    grouped = itertools.groupby(cluster_tuples, key=get_1)

    clusters = [ list(map(get_0, v))
        for k, v in grouped ]

    return clusters 
Example 9
Project: small_norb   Author: ndrplz   File: dataset.py    License: MIT License 6 votes vote down vote up
def group_dataset_by_category_and_instance(self, dataset_split):
        """
        Group small NORB dataset for (category, instance) key
        
        Parameters
        ----------
        dataset_split: str
            Dataset split, can be either 'train' or 'test'

        Returns
        -------
        groups: list
            List of 25 groups of 972 elements each. All examples of each group are
            from the same category and instance
        """
        if dataset_split not in ['train', 'test']:
            raise ValueError('Dataset split "{}" not allowed.'.format(dataset_split))

        groups = []
        for key, group in groupby(iterable=sorted(self.data[dataset_split]),
                                  key=lambda x: (x.category, x.instance)):
            groups.append(list(group))

        return groups 
Example 10
Project: coursys   Author: sfu-fas   File: views.py    License: GNU General Public License v3.0 6 votes vote down vote up
def submissions(request: HttpRequest, course_slug: str, activity_slug: str) -> HttpResponse:
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person') \
        .order_by('student__person')

    students = set(a.student for a in answers)
    starts_ends = quiz.get_starts_ends(students)
    by_student = itertools.groupby(answers, key=lambda a: a.student)
    subs_late = [(member, max(a.modified_at for a in ans) - starts_ends[member][1]) for member, ans in by_student]

    context = {
        'offering': offering,
        'activity': activity,
        'quiz': quiz,
        'subs_late': subs_late,
        'timedelta_zero': datetime.timedelta(seconds=0)
    }
    return render(request, 'quizzes/submissions.html', context=context) 
Example 11
Project: coursys   Author: sfu-fas   File: views.py    License: GNU General Public License v3.0 6 votes vote down vote up
def _setup_download(request: HttpRequest, course_slug: str, activity_slug: str):
    offering = get_object_or_404(CourseOffering, slug=course_slug)
    activity = get_object_or_404(Activity, slug=activity_slug, offering=offering, group=False)
    quiz = get_object_or_404(Quiz, activity=activity)
    questions = Question.objects.filter(quiz=quiz)
    versions = QuestionVersion.objects.filter(question__in=questions)
    version_number_lookup = {  # version_number_lookup[question_id][version_id] == version_number
        q_id: {v.id: i+1 for i,v in enumerate(vs)}
        for q_id, vs in itertools.groupby(versions, key=lambda v: v.question_id)
    }

    answers = QuestionAnswer.objects.filter(question__in=questions) \
        .select_related('student__person', 'question_version', 'question') \
        .order_by('student__person')

    by_student = itertools.groupby(answers, key=lambda a: a.student)
    multiple_versions = len(questions) != len(versions)

    return activity, questions, version_number_lookup, by_student, multiple_versions 
Example 12
Project: coursys   Author: sfu-fas   File: views.py    License: GNU General Public License v3.0 6 votes vote down vote up
def courses_json(request, semester):
    offerings = CourseOffering.objects.filter(semester__name=semester)\
        .exclude(component="CAN").exclude(flags=CourseOffering.flags.combined) \
        .select_related('semester').prefetch_related('meetingtime_set')
    instructors = Member.objects.filter(role='INST', offering__semester__name=semester).select_related('person')
    instr_by_offeringid = dict(
        (oid, list(instr))
        for oid, instr
        in itertools.groupby(instructors, lambda m: m.offering_id)
    )

    resp = HttpResponse(content_type="application/json")
    resp['Content-Disposition'] = 'inline; filename="' + semester + '.json"'
    crs_data = (o.export_dict(instructors=instr_by_offeringid.get(o.id, [])) for o in offerings)
    json.dump({'courses': list(crs_data)}, resp, indent=1)
    return resp 
Example 13
Project: coursys   Author: sfu-fas   File: models.py    License: GNU General Public License v3.0 6 votes vote down vote up
def deduplicate(cls, start_date=None, end_date=None, dry_run=False):
        """
        Remove any EnrolmentHistory objects that aren't adding any new information.
        """
        all_ehs = EnrolmentHistory.objects.order_by('offering', 'date')
        if start_date:
            all_ehs = all_ehs.filter(date__gte=start_date)
        if end_date:
            all_ehs = all_ehs.filter(date__lte=end_date)

        for off_id, ehs in itertools.groupby(all_ehs, key=lambda eh: eh.offering_id):
            # iterate through EnrolmentHistory for this offering and purge any "same as yesterday" entries
            with transaction.atomic():
                current = next(ehs)
                for eh in ehs:
                    if current.is_dup(eh):
                        if not dry_run:
                            eh.delete()
                        else:
                            print('delete', eh)
                    else:
                        current = eh 
Example 14
Project: coursys   Author: sfu-fas   File: importer.py    License: GNU General Public License v3.0 6 votes vote down vote up
def import_joint(extra_where='1=1'):
    """
    Find combined sections and set CourseOffering.config['joint_with'] appropriately.
    """
    db = SIMSConn()
    db.execute("SELECT strm, class_nbr, sctn_combined_id FROM ps_sctn_cmbnd c WHERE c.strm IN %s "
               " AND ("+extra_where+")", (import_semesters(),))

    for k,v in itertools.groupby(db, lambda d: (d[0], d[2])):
        # for each combined offering...
        strm, _ = k
        class_nbrs = [int(class_nbr) for _,class_nbr,_ in v]
        offerings = CourseOffering.objects.filter(semester__name=strm, class_nbr__in=class_nbrs)
        for offering in offerings:
            offering.set_joint_with([o.slug for o in offerings if o != offering])
            offering.save() 
Example 15
Project: coursys   Author: sfu-fas   File: cs_gender_exploration.py    License: GNU General Public License v3.0 6 votes vote down vote up
def get_bad_gpa(self):
        current_semester = Semester.current()
        semesters = [current_semester.name, current_semester.offset_name(-1), current_semester.offset_name(-2)]
        cmpt_acad_progs, eng_acad_progs = get_fas_programs()

        cmpt_gpas = BadGPAsQuery(query_args={
            'acad_progs': cmpt_acad_progs,
            'strms': semesters,
            'gpa': '2.4',
        })
        low_gpas = cmpt_gpas.result()
        self.artifacts.append(low_gpas)

        rows = low_gpas.rows
        rows.sort()
        groups = itertools.groupby(rows, CSGenderExplorationReport.group_bin)
        out_rows = [[prog_gpa[0], prog_gpa[1], prog_gpa[2], len(list(students))] for prog_gpa, students in groups]
        bins = Table()
        bins.append_column('ACAD_PROG_PRIMARY')
        bins.append_column('GENDER')
        bins.append_column('GPA')
        bins.append_column('COUNT')
        bins.rows = out_rows
        self.artifacts.append(bins) 
Example 16
Project: coursys   Author: sfu-fas   File: timeline.py    License: GNU General Public License v3.0 6 votes vote down vote up
def add_transfer_happenings(self):
        """
        Categorize the careers by adm_appl_nbr and acad_prog.stdnt_car_nbr: being the same means same program
        application event. If those were split between departments, then it's an inter-departmental transfer.

        Those need transfer out/in happenings added.
        """
        adm_appl_groups = itertools.groupby(self.careers, lambda c: (c.adm_appl_nbr, c.app_stdnt_car_nbr))
        for (adm_appl_nbr, app_stdnt_car_nbr), careers in adm_appl_groups:
            careers = list(careers)
            if len(careers) == 1:
                continue

            # sort by order they happen: heuristically, effdt of first happening in career
            careers.sort(key=lambda c: c.happenings[0].effdt)

            # we have an inter-department transfer: create transfer in/out happenings
            for c_out, c_in in zip(careers, careers[1:]):
                effdt = c_in.happenings[0].effdt
                t_out = CareerUnitChangeOut(emplid=c_out.emplid, adm_appl_nbr=c_out.adm_appl_nbr, unit=c_out.unit,
                        otherunit=c_in.unit, effdt=effdt, admit_term=c_out.admit_term)
                t_in = CareerUnitChangeIn(emplid=c_in.emplid, adm_appl_nbr=c_in.adm_appl_nbr, unit=c_in.unit,
                        otherunit=c_out.unit, effdt=effdt, admit_term=c_in.admit_term)
                c_out.happenings.append(t_out)
                c_in.happenings.insert(0, t_in) 
Example 17
Project: ivona-speak   Author: Pythonity   File: command_line.py    License: MIT License 6 votes vote down vote up
def list_voices(access_key, secret_key, voice_language, voice_gender):
    """List available Ivona voices"""
    try:
        ivona_api = IvonaAPI(access_key, secret_key)
    except (ValueError, IvonaAPIException) as e:
        raise click.ClickException("Something went wrong: {}".format(repr(e)))

    click.echo("Listing available voices...")

    voices_list = ivona_api.get_available_voices(
        language=voice_language,
        gender=voice_gender,
    )

    # Group voices by language
    voices_dict = dict()
    data = sorted(voices_list, key=lambda x: x['Language'])
    for k, g in groupby(data, key=lambda x: x['Language']):
        voices_dict[k] = list(g)

    for ln, voices in voices_dict.items():
        voice_names = [v['Name'] for v in voices]
        click.echo("{}: {}".format(ln, ', '.join(voice_names)))

    click.secho("All done", fg='green') 
Example 18
Project: avrae   Author: avrae   File: gen_command_json.py    License: GNU General Public License v3.0 6 votes vote down vote up
def main(out='commands.json'):
    from dbot import bot
    modules = []

    # helpers
    no_category = '\u200bUncategorized'

    def get_category(command):
        cog = command.cog
        return cog.qualified_name if cog is not None else no_category

    # build an iterator of (category, commands)
    iterator = filter(lambda c: not c.hidden, bot.commands)
    filtered = sorted(iterator, key=get_category)
    to_iterate = itertools.groupby(filtered, key=get_category)

    # add modules to output
    for module, commands in to_iterate:
        modules.append(parse_module(module, commands))

    with open(out, 'w') as f:
        json.dump({
            "modules": modules
        }, f) 
Example 19
Project: svviz   Author: svviz   File: track.py    License: MIT License 5 votes vote down vote up
def _highlightOverlaps(self, positionCounts, yoffset, height, regionID, readID, isFlanking):
        overlapSegments = [list(i[1]) for i in itertools.groupby(sorted(positionCounts), lambda x: positionCounts[x]) if i[0] > 1]

        for segment in overlapSegments:
            start = min(segment)
            end = max(segment)

            curstart = self.scale.topixels(start, regionID)
            curend = self.scale.topixels(end, regionID)

            curColor = self.overlapColor
            # if isFlanking:
            #     curColor = "#88FF88"
            self.svg.rect(curstart, yoffset, curend-curstart, height, fill=curColor, 
                **{"class":"read", "data-readid":readID}) 
Example 20
Project: MPContribs   Author: materialsproject   File: views.py    License: MIT License 5 votes vote down vote up
def split_comp(compstr):
    """
    Splits a string containing the composition of a perovskite solid solution into its components
    Chemical composition: (am_1, am_2)(tm_1, tm_2)Ox
    :param compstr: composition as a string
    :return:        am_1, am_2, tm_1, tm_2;
    each of these output variables contains the species and the stoichiometries
    i.e. ("Fe", 0.6)
    """

    am_1, am_2, tm_1, tm_2 = None, None, None, None

    compstr_spl = ["".join(g) for _, g in groupby(str(compstr), str.isalpha)]

    for l in range(len(compstr_spl)):
        try:
            if (
                ptable.Element(compstr_spl[l]).is_alkaline
                or ptable.Element(compstr_spl[l]).is_alkali
                or ptable.Element(compstr_spl[l]).is_rare_earth_metal
            ):
                if am_1 is None:
                    am_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
                elif am_2 is None:
                    am_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
            if ptable.Element(compstr_spl[l]).is_transition_metal and not (
                ptable.Element(compstr_spl[l]).is_rare_earth_metal
            ):
                if tm_1 is None:
                    tm_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
                elif tm_2 is None:
                    tm_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
        # stoichiometries raise ValueErrors in pymatgen .is_alkaline etc., ignore these errors and skip that entry
        except ValueError:
            pass

    return am_1, am_2, tm_1, tm_2 
Example 21
Project: tmhmm.py   Author: dansondergaard   File: cli.py    License: MIT License 5 votes vote down vote up
def summarize(path):
    """
    Summarize a path as a list of (start, end, state) triples.
    """
    for state, group in itertools.groupby(enumerate(path), key=lambda x: x[1]):
        group = list(group)
        start = min(group, key=lambda x: x[0])[0]
        end = max(group, key=lambda x: x[0])[0]
        yield start, end, state 
Example 22
Project: simplesam   Author: mdshw5   File: simplesam.py    License: MIT License 5 votes vote down vote up
def cigar_split(self):
        # https://github.com/brentp/bwa-meth
        if self.cigar == "*":
            yield (0, None)
            raise StopIteration
        cig_iter = groupby(self.cigar, lambda c: c.isdigit())
        for _, n in cig_iter:
            op = int("".join(n)), "".join(next(cig_iter)[1])
            if op[1] in self._valid_cigar:
                yield op
            else:
                raise ValueError("CIGAR operation %s in record %s is invalid." % (op[1], self.qname)) 
Example 23
Project: matchpy   Author: HPAC   File: many_to_one.py    License: MIT License 5 votes vote down vote up
def as_graph(self):
        if Digraph is None:
            raise ImportError('The graphviz package is required to draw the graph.')
        graph = Digraph()
        for i in range(len(self.states)):
            graph.node(str(i), str(i))

        for state, edges in enumerate(self.states):
            for target, labels in itertools.groupby(sorted(edges.items()), key=itemgetter(1)):
                label = '\n'.join(bin(l)[2:].zfill(self.k) for l, _ in labels)
                graph.edge(str(state), str(target), label)

        return graph 
Example 24
Project: sixcells   Author: oprypin   File: common.py    License: GNU General Public License v3.0 5 votes vote down vote up
def together(self):
        if self.show_info:
            groups = itertools.groupby(self.members, key=lambda it: it.kind is Cell.full)
            return sum(1 for full, _ in groups if full) <= 1 
Example 25
Project: gnocchi   Author: gnocchixyz   File: amqp1d.py    License: Apache License 2.0 5 votes vote down vote up
def on_message(self, event):
        json_message = ujson.loads(event.message.body)
        timestamp = utils.dt_in_unix_ns(utils.utcnow())
        measures_by_host_and_name = sorted((
            (message["host"],
             self._serialize_identifier(index, message),
             value)
            for message in json_message
            for index, value in enumerate(message["values"])
        ))
        for (host, name), values in itertools.groupby(
                measures_by_host_and_name, key=lambda x: x[0:2]):
            measures = (incoming.Measure(timestamp, v[2]) for v in values)
            self.processor.add_measures(host, name, measures) 
Example 26
Project: gnocchi   Author: gnocchixyz   File: test_utils.py    License: Apache License 2.0 5 votes vote down vote up
def get_measures_list(measures_agg):
    return {
        aggmethod: list(itertools.chain(
            *[[(timestamp, measures_agg[agg].aggregation.granularity, value)
               for timestamp, value in measures_agg[agg]]
              for agg in sorted(aggs,
                                key=storage.ATTRGETTER_GRANULARITY,
                                reverse=True)]))
        for aggmethod, aggs in itertools.groupby(measures_agg.keys(),
                                                 storage.ATTRGETTER_METHOD)
    } 
Example 27
Project: gnocchi   Author: gnocchixyz   File: __init__.py    License: Apache License 2.0 5 votes vote down vote up
def group_metrics_by_sack(self, metrics):
        """Iterate on a list of metrics, grouping them by sack.

        :param metrics: A list of metric uuid.
        :return: An iterator yield (group, metrics).
        """
        metrics_and_sacks = sorted(
            ((m, self.sack_for_metric(m)) for m in metrics),
            key=ITEMGETTER_1)
        for sack, metrics in itertools.groupby(metrics_and_sacks,
                                               key=ITEMGETTER_1):
            yield sack, [m[0] for m in metrics] 
Example 28
Project: gnocchi   Author: gnocchixyz   File: file.py    License: Apache License 2.0 5 votes vote down vote up
def _list_split_keys_unbatched(self, metric, aggregations, version=3):
        keys = collections.defaultdict(set)
        for method, grouped_aggregations in itertools.groupby(
                sorted(aggregations, key=ATTRGETTER_METHOD),
                ATTRGETTER_METHOD):
            try:
                files = os.listdir(
                    self._build_metric_path(metric, method))
            except OSError as e:
                if e.errno == errno.ENOENT:
                    raise storage.MetricDoesNotExist(metric)
                raise
            raw_keys = list(map(
                lambda k: k.split("_"),
                filter(
                    lambda f: self._version_check(f, version),
                    files)))
            if not raw_keys:
                continue
            zipped = list(zip(*raw_keys))
            k_timestamps = utils.to_timestamps(zipped[0])
            k_granularities = list(map(utils.to_timespan, zipped[1]))
            grouped_aggregations = list(grouped_aggregations)
            for timestamp, granularity in six.moves.zip(
                    k_timestamps, k_granularities):
                for agg in grouped_aggregations:
                    if granularity == agg.granularity:
                        keys[agg].add(carbonara.SplitKey(
                            timestamp,
                            sampling=granularity))
                        break
        return keys 
Example 29
Project: razzy-spinner   Author: rafasashi   File: agreement.py    License: GNU General Public License v3.0 5 votes vote down vote up
def _grouped_data(self, field, data=None):
        data = data or self.data
        return groupby(sorted(data, key=itemgetter(field)), itemgetter(field)) 
Example 30
Project: NeuroKit   Author: neuropsychology   File: signal_phase.py    License: MIT License 5 votes vote down vote up
def _signal_phase_binary(signal):

    phase = itertools.chain.from_iterable(np.linspace(0, 1, sum([1 for i in v])) for _, v in itertools.groupby(signal))
    phase = np.array(list(phase))

    # Convert to radiant
    phase = np.deg2rad(phase * 360)
    return phase