Python time.time() Examples

The following are code examples for showing how to use time.time(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: SyNEThesia   Author: RunOrVeith   File: session_management.py    MIT License 8 votes vote down vote up
def utilize_session(self, model_name, data_provider, **kwargs):
        with SessionHandler(model=self.model, model_name=model_name) as session_handler:
            session_handler.load_weights_or_init()
            start_time = time.time()
            step = session_handler.step
            available = locals()
            available.pop("self", None)
            available.update(kwargs)
            print(f"{'Resuming' if step > 0 else 'Starting'} {model_name}: at step {step}")

            for input_feature in data_provider:
                available["input_feature"] = input_feature
                for hook in self.hooks:
                    provided = hook(self=self, **available)
                    available.update(provided)

                    yield hook.get_yieldables(self=self, **available) 
Example 2
Project: s2g   Author: caesar0301   File: test.py    MIT License 7 votes vote down vote up
def test_point_projects_to_edge(self):
        # p = (114.83299055, 26.8892277)
        p = (121.428387, 31.027371)
        a = time.time()
        edges, segments = self.sg.point_projects_to_edges(p, 0.01)
        print(time.time() - a)

        if self.show_plots:
            plt.figure()
            s2g.plot_lines(MultiLineString(segments), color='orange')  # original roads
            for i in range(0, len(edges)):
                s, e = edges[i]
                sxy = self.sg.node_xy[s]
                exy = self.sg.node_xy[e]
                plt.plot([sxy[0], exy[0]], [sxy[1], exy[1]], color='green')  # graph edges
            plt.plot(p[0], p[1], color='red', markersize=12, marker='o')  # bridges
            plt.show() 
Example 3
Project: fs_image   Author: facebookincubator   File: repo_server.py    MIT License 7 votes vote down vote up
def read_snapshot_dir(path: Path):
    db_path = path / 'snapshot.sql3'
    assert os.path.exists(db_path), f'no {db_path}, use rpm_repo_snapshot()'
    location_to_obj = add_snapshot_db_objs(sqlite3.connect(db_path))
    for repo in os.listdir(path / 'repos'):
        # Make JSON metadata for the repo's GPG keys.
        key_dir = path / 'repos' / repo / 'gpg_keys'
        for key_filename in os.listdir(key_dir.decode()):
            with open(key_dir / key_filename, 'rb') as infile:
                key_content = infile.read()
            location_to_obj[os.path.join(repo.decode(), key_filename)] = {
                'size': len(key_content),
                # We don't have a good timestamp for these, so set it to
                # "now".  Caching efficiency losses should be negligible :)
                'build_timestamp': int(time.time()),
                'content_bytes': key_content,  # Instead of `storage_id`
            }
    return location_to_obj 
Example 4
Project: Home_Surveillance_with_Python   Author: kalfasyan   File: base_camera.py    MIT License 6 votes vote down vote up
def set(self):
        """Invoked by the camera thread when a new frame is available."""
        now = time.time()
        remove = None
        for ident, event in self.events.items():
            if not event[0].isSet():
                # if this client's event is not set, then set it
                # also update the last set timestamp to now
                event[0].set()
                event[1] = now
            else:
                # if the client's event is already set, it means the client
                # did not process a previous frame
                # if the event stays set for more than 5 seconds, then assume
                # the client is gone and remove it
                if now - event[1] > 5:
                    remove = ident
        if remove:
            del self.events[remove] 
Example 5
Project: Home_Surveillance_with_Python   Author: kalfasyan   File: base_camera.py    MIT License 6 votes vote down vote up
def _thread(cls):
        """Camera background thread."""
        print('Starting camera thread.')
        frames_iterator = cls.frames()
        for frame in frames_iterator:
            BaseCamera.frame = frame
            BaseCamera.event.set()  # send signal to clients
            time.sleep(0)

            # if there hasn't been any clients asking for frames in
            # the last 10 seconds then stop the thread
            if time.time() - BaseCamera.last_access > 10*60*500:
                frames_iterator.close()
                print('Stopping camera thread due to inactivity.')
                break
        BaseCamera.thread = None 
Example 6
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 6 votes vote down vote up
def generate_window_counts(self):
        self._generate_windows()
        print("** Window read counting started for {} libraries...".format(len(
            self._lib_dict)), flush=True)
        t_start = time()
        for lib_name, lib in self._lib_dict.items():
            print(lib_name, flush=True)
            for replicon in self._replicon_dict:
                lib.replicon_dict[replicon][
                    "window_list"] = self._replicon_dict[replicon][
                        "window_list"]
            lib.count_reads_for_windows()
        t_end = time()
        print("Window read counting finished in {} seconds.\n".format(
            t_end-t_start), flush=True)
        print("** Generating data frames and filtering windows...", flush=True)
        t_start = time()
        self._convert_to_data_frame()
        t_end = time()
        print("Data frame generation and filtering finished in {} seconds.\n"
              .format(t_end-t_start), flush=True) 
Example 7
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 6 votes vote down vote up
def _plot_and_write_windows_gfold(self):
        # plot windows
        print("* Plotting normalized windows...", flush=True)
        t_start = time()
        sig_window_df = self._window_df[self._window_df.significant]
        unsig_window_df = self._initial_window_df[
            ~self._initial_window_df.index.isin(sig_window_df.index)]
        self._plot_initial_windows(unsig_window_df.base_means,
                                   unsig_window_df.fold_change,
                                   sig_window_df.base_means,
                                   sig_window_df.fold_change)
        t_end = time()
        print("Plotting took {} seconds.".format(t_end-t_start), flush=True)
        # write windows after prefiltering with test results
        self._window_df.to_csv(
            "{}/windows_after_prefiltering.csv".format(self._output_folder),
            sep='\t', index=False, encoding='utf-8') 
Example 8
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 6 votes vote down vote up
def _filter_peaks_without_control(self, df):
        # calculate mad for original data frame
        median_abs_dev_from_zero = mad(df.loc[:, self._exp_lib_list].mean(
            axis=1), center=0.0)
        # minimum expression cutoff based on mean over experiment libraries
        print("Removing peaks based on mad cutoff from DataFrame "
              "with {} rows...".format(len(df)), flush=True)
        t_start = time()
        min_expr = (self._mad_multiplier * median_abs_dev_from_zero)
        print("Minimal peak expression based on mean over RIP/CLIP "
              "libraries:" "{} (MAD from zero: {})".format(
                  min_expr, median_abs_dev_from_zero), flush=True)
        df = df.loc[df.loc[:, self._exp_lib_list].mean(axis=1) >= min_expr, :]
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(df)), flush=True)
        return df 
Example 9
Project: weibo-login   Author: littlepinecone   File: login.py    GNU General Public License v3.0 6 votes vote down vote up
def path_generate(a):
    pos = {'1': [32, 32],
           '2': [128, 32],
           '3': [32, 128],
           '4': [128, 128]}
    path = []
    t0 = (int(round(time.time() * 1000)))
    t00 = 0
    for j in range(0, 3):
        for i in range(0, 7):
            x = pos[a[j]][0] + i * (pos[a[j + 1]][0] - pos[a[j]][0]) / 6 + int(random.uniform(1, 3))

            y = pos[a[j]][1] + i * (pos[a[j + 1]][1] - pos[a[j]][1]) / 6 + int(random.uniform(2, 3))

            t = 30 * int(random.uniform(1, 2))
            t00 += t

            path0 = [x, y, t00]
            path.append(path0)
    path[0][2] = t0
    # print path
    return path 
Example 10
Project: incubator-spot   Author: apache   File: flow_oa.py    Apache License 2.0 6 votes vote down vote up
def start(self):       
        
        ####################
        start = time.time()
        ####################         

        self._create_folder_structure()
        self._clear_previous_executions()        
        self._add_ipynb()  
        self._get_flow_results()
        self._add_network_context()
        self._add_geo_localization()
        self._add_reputation()        
        self._create_flow_scores()
        self._get_oa_details()
        self._ingest_summary()

        ##################
        end = time.time()
        print(end - start)
        ################## 
Example 11
Project: incubator-spot   Author: apache   File: proxy_oa.py    Apache License 2.0 6 votes vote down vote up
def start(self):

        ####################
        start = time.time()
        ####################

        self._create_folder_structure()
        self._clear_previous_executions()   
        self._add_ipynb()
        self._get_proxy_results()
        self._add_reputation() 
        self._add_iana()
        self._add_network_context() 
        self._create_proxy_scores_csv()
        self._get_oa_details()
        self._ingest_summary()


        ##################
        end = time.time()
        print(end - start)
        ################## 
Example 12
Project: incubator-spot   Author: apache   File: dns_oa.py    Apache License 2.0 6 votes vote down vote up
def start(self):

        ####################
        start = time.time()
        ####################

        self._clear_previous_executions()
        self._create_folder_structure()
        self._add_ipynb()
        self._get_dns_results()
        self._add_tld_column()
        self._add_reputation()
        self._add_hh_column()
        self._add_iana()
        self._add_network_context()
        self._create_dns_scores()
        self._get_oa_details()
        self._ingest_summary()

        ##################
        end = time.time()
        print(end - start)
        ################## 
Example 13
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 6 votes vote down vote up
def test_concurrent():
    # run this in multiple processes,
    # the idea is that collisions will crash, but not munge data
    import random
    import time
    if not os.path.exists("tmp_test_con.log"):
        init_data_file("tmp_test_con.log")

    k = LadderData("tmp_test_con.log")
    # test concurrent log writes
    thread = str(random.randint(0, 100))
    print thread
    for x in range(100):
        time.sleep(0.01)
        k.add_player(thread + '_' + str(x))

    p, g = k.get_players_games()
    print len(p) 
Example 14
Project: webnull   Author: macrael   File: webnull.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def arg_parser():
    parser = argparse.ArgumentParser(description='A tool for putting websites into a black hole.')
    commands = parser.add_subparsers(title='commands', metavar='COMMAND')
    commands.required = True
    commands.dest = "commands"

    deny = commands.add_parser('deny', description='Add a site to the black hole. It will become unreachable.', help='Add a site to the black hole. It will become unreachable.')
    deny.add_argument('sitename', help='The website to be blackholed. A URL will be stripped down correctly.')
    deny.set_defaults(func=deny_site)

    allow = commands.add_parser('allow', description='Allow access to a blackholed site for a spell.', help='Allow access to a blackholed site for a spell.')
    time_or_tomorrow = allow.add_mutually_exclusive_group()
    time_or_tomorrow.add_argument('-t', '--time', help='sets the duration to enable a site for. Default is five minutes.', default=5, type=int)
    time_or_tomorrow.add_argument('-m', '--morning', help='allow all sites until tomorrow morning at 6am', action='store_true')
    all_or_one = allow.add_mutually_exclusive_group(required=True)
    all_or_one.add_argument('-a', '--all', action='store_true', help='All blackholed hostnames will be granted access instead of a matching sitename.')
    all_or_one.add_argument('sitename', help='All blackholed hostnames that contain this string will be temporarlly granted access.', nargs='?')
    allow.set_defaults(func=allow_site)

    return parser 
Example 15
Project: unicorn-hat-hd   Author: pimoroni   File: demo.py    MIT License 5 votes vote down vote up
def current_milli_time():
    return int(round(time.time() * 1000)) 
Example 16
Project: UrsaRobotics_SmartHome   Author: stav98   File: speech_recogn.py    GNU General Public License v3.0 5 votes vote down vote up
def wait_for_message(d):
    client.loop_start()
    time.sleep(d)
    client.loop_stop()

#Για πάντα 
Example 17
Project: s2g   Author: caesar0301   File: test.py    MIT License 5 votes vote down vote up
def test_subgraph_within_box(self):
        bounding_box = box(121.428387, 31.027371, 121.430863, 31.030227)
        a = time.time()
        subgraph = self.sg.subgraph_within_box(bounding_box)
        print(time.time() - a)
        if self.show_plots:
            plt.figure()
            nx.draw(subgraph, pos=self.sg.node_xy, node_size=50)
            plt.show() 
Example 18
Project: fs_image   Author: facebookincubator   File: demo_sendstreams.py    MIT License 5 votes vote down vote up
def _populate_sendstream_dict(d):
    d['build_start_time'] = _float_to_sec_nsec_tuple(time.time())
    yield d
    d['dump'] = subprocess.run(
        ['btrfs', 'receive', '--dump'],
        input=d['sendstream'], stdout=subprocess.PIPE, check=True,
        # split into lines to make the `pretty` output prettier
    ).stdout.rstrip(b'\n').split(b'\n')
    d['build_end_time'] = _float_to_sec_nsec_tuple(time.time())


# Takes `path_in_repo` because this is part of the library interface, and
# thus must work in @mode/opt, and thus we cannot use `__file__` here. 
Example 19
Project: fs_image   Author: facebookincubator   File: repo_objects.py    MIT License 5 votes vote down vote up
def new(cls, *, xml: bytes):  # NamedTuple.__new__ cannot be overridden
        repodatas = frozenset(_parse_repomd(xml))
        return cls.__new__(
            cls,
            xml=xml,
            fetch_timestamp=int(time.time()),
            build_timestamp=max(r.build_timestamp for r in repodatas),
            repodatas=repodatas,
            checksum=Checksum(
                algorithm=CANONICAL_HASH,
                hexdigest=hashlib.new(CANONICAL_HASH, xml).hexdigest(),
            ),
            size=len(xml),
        ) 
Example 20
Project: fs_image   Author: facebookincubator   File: test_common.py    MIT License 5 votes vote down vote up
def test_retry_fn(self):

        class Retriable:
            def __init__(self, attempts_to_fail=0):
                self.attempts = 0
                self.first_success_attempt = attempts_to_fail + 1

            def run(self):
                self.attempts += 1
                if self.attempts >= self.first_success_attempt:
                    return self.attempts
                raise RuntimeError(self.attempts)

        self.assertEqual(1, retry_fn(
            Retriable().run, delays=[], what='succeeds immediately'
        ))

        # Check log messages, and ensure that delays add up as expected
        start_time = time.time()
        with self.assertLogs(common_log) as log_ctx:
            self.assertEqual(4, retry_fn(
                Retriable(3).run, delays=[0, 0.1, 0.2], what='succeeds on try 4'
            ))
        self.assertTrue(any(
            '\n[Retry 3 of 3] succeeds on try 4 -- waiting 0.2 seconds.\n' in o
                for o in log_ctx.output
        ))
        self.assertGreater(time.time() - start_time, 0.3)

        # Check running out of retries
        with self.assertLogs(common_log) as log_ctx, \
                self.assertRaises(RuntimeError) as ex_ctx:
            retry_fn(Retriable(100).run, delays=[0] * 7, what='never succeeds')
        self.assertTrue(any(
            '\n[Retry 7 of 7] never succeeds -- waiting 0 seconds.\n' in o
                for o in log_ctx.output
        ))
        self.assertEqual((8,), ex_ctx.exception.args) 
Example 21
Project: SyNEThesia   Author: RunOrVeith   File: session_types.py    MIT License 5 votes vote down vote up
def time_diff(start_time):
    m, s = divmod(time.time() - start_time, 60)
    h, m = divmod(m, 60)
    d, h = divmod(h, 24)
    return "%d:%02d:%02d:%02d" % (d, h, m, s) 
Example 22
Project: SyNEThesia   Author: RunOrVeith   File: session_types.py    MIT License 5 votes vote down vote up
def _maybe_save(self, session_handler, step, start_time, save_every_n_steps):
        if step % save_every_n_steps == 0 and step > 0:
            session_handler.save(step=step)
            print(f"Step {step}, time: {time_diff(start_time)}: Saving in {session_handler.checkpoint_dir}") 
Example 23
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array, with each element is a string
        """
        start = time.time()
        print("Start Loading Data from BCF {}".format(
            'MEMORY' if self._bcf_mode == 'MEM' else 'FILE'))

        self._labels = np.loadtxt(self._label_fn).astype(str)

        if self._bcf.size() != self._labels.shape[0]:
            raise Exception("Number of samples in data"
                            "and labels are not equal")
        else:
            for idx in range(self._bcf.size()):
                datum_str = self._bcf.get(idx)
                self._data.append(datum_str)
        end = time.time()
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 24
Project: Home_Surveillance_with_Python   Author: kalfasyan   File: base_camera.py    MIT License 5 votes vote down vote up
def wait(self):
        """Invoked from each client's thread to wait for the next frame."""
        ident = get_ident()
        if ident not in self.events:
            # this is a new client
            # add an entry for it in the self.events dict
            # each entry has two elements, a threading.Event() and a timestamp
            self.events[ident] = [threading.Event(), time.time()]
        return self.events[ident][0].wait() 
Example 25
Project: Home_Surveillance_with_Python   Author: kalfasyan   File: base_camera.py    MIT License 5 votes vote down vote up
def __init__(self):
        """Start the background camera thread if it isn't running yet."""
        if BaseCamera.thread is None:
            BaseCamera.last_access = time.time()

            # start background frame thread
            BaseCamera.thread = threading.Thread(target=self._thread)
            BaseCamera.thread.start()

            # wait until frames are available
            while self.get_frame() is None:
                time.sleep(0) 
Example 26
Project: Home_Surveillance_with_Python   Author: kalfasyan   File: base_camera.py    MIT License 5 votes vote down vote up
def get_frame(self):
        """Return the current camera frame."""
        BaseCamera.last_access = time.time()

        # wait for a signal from the camera thread
        BaseCamera.event.wait()
        BaseCamera.event.clear()

        return BaseCamera.frame 
Example 27
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 5 votes vote down vote up
def _convert_to_data_frame(self):
        self._window_df = pd.DataFrame()
        for replicon in sorted(self._replicon_dict):
            for strand in ["+", "-"]:
                # add window positions to data frame
                row_number = len(self._replicon_dict[replicon]["window_list"])
                df = pd.concat([
                    pd.Series([replicon] * row_number),
                    pd.Series([strand] * row_number),
                    pd.Series([window[0]+1 for window in
                               self._replicon_dict[
                                   replicon]["window_list"]]),
                    pd.Series([window[1] for window in
                               self._replicon_dict[
                        replicon]["window_list"]])], axis=1)
                df.columns = ["replicon", "strand", "w_start", "w_end"]
                # add library counts to data frame
                for lib_name, lib in self._lib_dict.items():
                    df[lib_name] = (pd.Series(lib.replicon_dict[
                        replicon]["window_counts"].loc[:, strand]))
                self._window_df = self._window_df.append(df,
                                                         ignore_index=True)
            del self._replicon_dict[replicon]["window_list"]
        # remove windows without expression in any library
        print("Removing empty windows from DataFrame with {} rows...".format(
            len(self._window_df.index)), flush=True)
        t_start = time()
        self._window_df = self._window_df.loc[
            (self._window_df.loc[:, self._lib_names_list].sum(axis=1) > 0), :]
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(self._window_df.index)), flush=True)
        if self._window_df.empty:
            print("**Dataframe empty**", flush=True)
            return
        if self._stat_test == "gtest":
            self._run_gtest_preprocessing()
        elif self._stat_test == "deseq":
            self._run_deseq_preprocessing() 
Example 28
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 5 votes vote down vote up
def _run_gtest_preprocessing(self):
        # define size factors
        self._define_size_factors()
        # add pseudocounts
        self._window_df[self._lib_names_list] += 1.0
        # normalize counts
        self._window_df[self._lib_names_list] = self._window_df[
            self._lib_names_list].div(
                self._size_factors, axis='columns')
        t_end = time()
        # calculate base means for all windows
        print("Calculating base means and fold changes...", flush=True)
        t_start = time()
        self._window_df["base_means"] = self._window_df.loc[
            :, self._lib_names_list].mean(axis=1)
        # calculate fcs for all windows
        self._window_df["fold_change"] = (
            self._window_df.loc[:, self._exp_lib_list].sum(axis=1) /
            self._window_df.loc[:, self._ctr_lib_list].sum(axis=1))
        t_end = time()
        print("Calculation took {} seconds.".format(t_end-t_start), flush=True)
        # write raw windows to file
        print("Writing normalized windows to file...", flush=True)
        t_start = time()
        self._window_df.to_csv("{}/raw_windows.csv".format(
            self._output_folder), sep='\t', index=False, encoding='utf-8')
        t_end = time()
        print("Writing took {} seconds.".format(t_end-t_start), flush=True)
        # filter windows
        print("* Filtering windows...", flush=True)
        self._initial_window_df = self._window_df.copy()
        self._window_df = self._prefilter_windows_gtest(self._window_df) 
Example 29
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 5 votes vote down vote up
def _prefilter_windows_deseq(self, df):
        print("Removing windows where not all experiment libs show "
              "expression from DataFrame with {} rows...".format(len(df)),
              flush=True)
        t_start = time()
        for exp_lib in self._exp_lib_list:
            exp_lib_zero_count = 0.0
            df = df.loc[(df.loc[:, exp_lib] > exp_lib_zero_count), :]
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(df)), flush=True)
        if df.empty:
            return df
        initial_window_df = df.copy()
        # normalize counts on initial windows
        initial_window_df[self._lib_names_list] = initial_window_df[
            self._lib_names_list].div(self._size_factors, axis='columns')
        # minimum expression cutoff based on mean over experiment libraries
        print("Removing windows based on mad cutoff from DataFrame "
              "with {} rows...".format(len(df)), flush=True)
        t_start = time()
        median_abs_dev_from_zero = mad(initial_window_df.loc[
            :, self._exp_lib_list].mean(axis=1), center=0.0)
        min_expr = (self._mad_multiplier * median_abs_dev_from_zero)
        print("Minimal window expression based on mean over RIP/CLIP "
              "libraries: {} (MAD from zero: {})".format(
                  min_expr, median_abs_dev_from_zero), flush=True)
        df = df.loc[initial_window_df.loc[:, self._exp_lib_list].mean(
            axis=1) >= min_expr, :]
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(df)), flush=True)
        return df 
Example 30
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 5 votes vote down vote up
def _generate_peak_counts(self):
        print("* Peak read counting started for {} libraries...".format(len(
            self._lib_dict)), flush=True)
        t_start = time()
        for lib_name, lib in self._lib_dict.items():
            print(lib_name, flush=True)
            for replicon in self._replicon_dict:
                lib.replicon_dict[replicon]["peak_df"] = self._replicon_dict[
                    replicon]["peak_df"]
            lib.count_reads_for_peaks()
        t_end = time()
        print("Peak read counting finished in {} seconds.".format
              (t_end-t_start), flush=True) 
Example 31
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 5 votes vote down vote up
def _generate_peak_counts(self):
        print("** Peak read counting started for {} libraries...".format(
            len(self._lib_dict)), flush=True)
        t_start = time()
        for lib_name, lib in self._lib_dict.items():
            print(lib_name, flush=True)
            for replicon in self._replicon_dict:
                lib.replicon_dict[replicon]["peak_df"] = self._replicon_dict[
                    replicon]["peak_df"]
            lib.count_reads_for_peaks()
        t_end = time()
        print("Peak read counting finished in {} seconds.".format(
            t_end-t_start), flush=True) 
Example 32
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 5 votes vote down vote up
def _filter_peaks_without_replicates(self, df):
        # calculate mad for original data frame
        median_abs_dev_from_zero = mad(df.loc[:, self._exp_lib_list].mean(
            axis=1), center=0.0)
        # minimum expression cutoff based on mean over experiment libraries
        print("Removing peaks based on mad cutoff from DataFrame "
              "with {} rows...".format(len(df)), flush=True)
        t_start = time()
        min_expr = (self._mad_multiplier * median_abs_dev_from_zero)
        print("Minimal peak expression based on mean over RIP/CLIP "
              "libraries:" "{} (MAD from zero: {})".format(
                  min_expr, median_abs_dev_from_zero), flush=True)
        df = df.loc[df.loc[:, self._exp_lib_list].mean(axis=1) >= min_expr, :]
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(df)), flush=True)
        if df.empty:
            return df
        # minimum fold change
        print("Removing windows based on minimum fold change from DataFrame "
              "with {} rows...".format(len(df)), flush=True)
        t_start = time()
        df = df.query('fold_change >= @self._fc_cutoff')
        t_end = time()
        print("Removal took {} seconds. DataFrame contains now {} rows.".
              format((t_end-t_start), len(df)), flush=True)
        return df 
Example 33
Project: PEAKachu   Author: tbischler   File: coverage.py    ISC License 5 votes vote down vote up
def generate_normalized_wiggle_files(project_folder, max_proc):
    parameter_dict = _read_parameters(project_folder)
    # create normalized coverage folder if it does not exist
    wiggle_folder = "{}/normalized_coverage".format(project_folder)
    if not exists(wiggle_folder):
        makedirs(wiggle_folder)
    # Generate coverage files in parallel
    print("** Generating normalized coverage files for {} libraries...".format(
          len(parameter_dict["libraries"])), flush=True)
    t_start = time()
    with futures.ProcessPoolExecutor(
            max_workers=max_proc) as executor:
        future_to_lib_name = {
            executor.submit(
                _generate_normalized_wiggle_file_for_lib, lib_name,
                lib["bam_file"], parameter_dict["paired_end"],
                parameter_dict["max_insert_size"], lib["size_factor"],
                wiggle_folder): lib_name for lib_name, lib
            in parameter_dict["libraries"].items()}
    for future in futures.as_completed(future_to_lib_name):
        lib_name = future_to_lib_name[future]
        print("* Coverage files for library {} generated.".format(lib_name),
              flush=True)
    t_end = time()
    print("Coverage file generation finished in {} seconds.".format(
        t_end-t_start), flush=True) 
Example 34
Project: PEAKachu   Author: tbischler   File: controller.py    ISC License 5 votes vote down vote up
def _init_replicons(self):
        print("** Initializing replicons and reading annotations from .gff "
              "files if present...", flush=True)
        t_start = time()
        replicons = Replicons(self._args.ctr_libs, self._args.exp_libs,
                              self._args.gff_folder, self._args.features,
                              self._args.sub_features)
        replicons.init_replicons()
        t_end = time()
        print("Finished replicon initialization in {} seconds.\n".format(
            t_end-t_start), flush=True)
        return replicons 
Example 35
Project: PEAKachu   Author: tbischler   File: controller.py    ISC License 5 votes vote down vote up
def _calc_sig_peaks_adaptive(self, adaptive, size_factors):
        # Run DESeq2 only if >1 libraries are available for both, experiment
        # and control libraries
        if len(self._args.exp_libs) > 1 and len(self._args.ctr_libs) > 1:
            print("** Calculating peak significance with DESeq2...",
                  flush=True)
            t_start = time()
            adaptive.run_deseq2_analysis(size_factors,
                                         self._args.pairwise_replicates)
            t_end = time()
            print("DESeq2 finished in {} seconds.\n".format(t_end-t_start),
                  flush=True)
        # If at least one control is available use fold change and MAD to
        # define significant peaks
        elif self._args.ctr_libs:
            print("** Calculating peak significance for insufficient "
                  "replicates based on fold change and MAD cutoff...",
                  flush=True)
            t_start = time()
            adaptive.run_analysis_without_replicates(size_factors)
            t_end = time()
            print("Peak calculation finished in {} seconds.\n".format(
                t_end-t_start), flush=True)
        # If no controls are available return initial peaks based on MAD cutoff
        else:
            print("** Calculating peaks without control based on MAD "
                  "cutoff...", flush=True)
            t_start = time()
            adaptive.run_analysis_without_control(size_factors)
            t_end = time()
            print("Peak calculation finished in {} seconds.\n".format(
                t_end-t_start), flush=True) 
Example 36
Project: 21tb_robot   Author: iloghyr   File: study_robot.py    MIT License 5 votes vote down vote up
def log(info):
    """simple log"""
    print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), info
    sys.stdout.flush() 
Example 37
Project: 21tb_robot   Author: iloghyr   File: study_robot.py    MIT License 5 votes vote down vote up
def study(self, course_id):
        """study one course"""
        time_step = 180
        log('start course:%s' % course_id)
        self.http.post(self.apis['enter_course'] % course_id, json_ret=False)
        course_show_api = self.apis['course_show'] % course_id
        log('url:%s' % course_show_api)
        self.http.post(course_show_api, json_ret=False)
        items_list = self.get_course_items(course_id)
        log('*' * 50)
        log('共有 %s 个子课程' % len(items_list))
        for index, i in enumerate(items_list):
            log('%s、%s ' % (index + 1, i['name']))
        log('*' * 50)
        log('begin to start...')
        for index, i in enumerate(items_list):
            sco_id = i['scoId']
            log('begin to study:%s-%s %s' % (index + 1, i['name'], sco_id))
            location = self.select_score_item(course_id, sco_id)
            cnt = 0
            while True:
                location = location + time_step * cnt
                cnt += 1
                log('location: %s' % location)
                self.do_heartbeat()
                self.update_timestep()
                ret = self.do_save(course_id, sco_id, location)
                if ret:
                    log('%s-%s %s' % (course_id, sco_id, 'done, start next'))
                    break
                log('*********** study %ss then go on *************' % time_step)
                time.sleep(time_step)
        info = '\033[92m\tDONE COURSE, url:%s\033[0m' % course_show_api
        log(info) 
Example 38
Project: 21tb_robot   Author: iloghyr   File: study_robot.py    MIT License 5 votes vote down vote up
def run(self):
        """入口"""
        s = time.time()
        course_list = []
        with open('study.list') as f:
            for course in f:
                course_list.append(course.strip())
        self.do_login()
        for course_id in course_list:
            try:
                self.study(course_id)
            except Exception as e:
                log("exception occured, study next..")
        cost = int(time.time() - s)
        log('main end, cost: %ss' % cost) 
Example 39
Project: incubator-spot   Author: apache   File: dns_oa.py    Apache License 2.0 5 votes vote down vote up
def _ingest_summary(self):
        # get date parameters.
        yr = self._date[:4]
        mn = self._date[4:6]
        dy = self._date[6:]

        self._logger.info("Getting ingest summary data for the day")
        
        ingest_summary_cols = ["date","total"]		
        result_rows = []        
        df_filtered =  pd.DataFrame()

        query_to_load = ("""
            SELECT frame_time, COUNT(*) as total FROM {0}.{1}
            WHERE y={2} AND m={3} AND d={4} AND unix_tstamp IS NOT NULL
            AND frame_time IS NOT NULL AND frame_len IS NOT NULL
            AND dns_qry_name IS NOT NULL AND ip_src IS NOT NULL
            AND (dns_qry_class IS NOT NULL AND dns_qry_type IS NOT NULL
            AND dns_qry_rcode IS NOT NULL ) GROUP BY frame_time;
        """).format(self._db,self._table_name, yr, mn, dy)

        results = impala.execute_query_as_list(query_to_load)
        df = pd.DataFrame(results)

        # Forms a new dataframe splitting the minutes from the time column
        df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(yr, mn, dy,\
            val['frame_time'].replace("  "," ").split(" ")[3].split(":")[0].zfill(2),\
            val['frame_time'].replace("  "," ").split(" ")[3].split(":")[1].zfill(2)),\
            int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df.iterrows()],columns = ingest_summary_cols)

        #Groups the data by minute
        sf = df_new.groupby(by=['date'])['total'].sum()
        df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})

        df_final = df_filtered.append(df_per_min, ignore_index=True).to_records(False,False)

        if len(df_final) > 0:
            query_to_insert=("""
                INSERT INTO {0}.dns_ingest_summary PARTITION (y={1}, m={2}, d={3}) VALUES {4};
            """).format(self._db, yr, mn, dy, tuple(df_final))
            impala.execute_query(query_to_insert) 
Example 40
Project: jumpserver-python-sdk   Author: jumpserver   File: utils.py    GNU General Public License v2.0 5 votes vote down vote up
def to_unixtime(time_string, format_string):
    with _STRPTIME_LOCK:
        return int(calendar.timegm(time.strptime(str(time_string), format_string))) 
Example 41
Project: jumpserver-python-sdk   Author: jumpserver   File: utils.py    GNU General Public License v2.0 5 votes vote down vote up
def make_signature(access_key_secret, date=None):
    if isinstance(date, bytes):
        date = bytes.decode(date)
    if isinstance(date, int):
        date_gmt = http_date(date)
    elif date is None:
        date_gmt = http_date(int(time.time()))
    else:
        date_gmt = date

    data = str(access_key_secret) + "\n" + date_gmt
    return content_md5(data) 
Example 42
Project: telegram-innovation-chatbot   Author: zaoldyeck   File: olami.py    MIT License 5 votes vote down vote up
def _gen_parameters(self, api, text, cusid):
        timestamp_ms = (int(time.time() * 1000))
        params = {'appkey': self.app_key,
                  'api': api,
                  'timestamp': timestamp_ms,
                  'sign': self._gen_sign(api, timestamp_ms),
                  'rq': self._gen_rq(text)}
        if cusid is not None:
            params.update(cusid=cusid)
        return params 
Example 43
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 5 votes vote down vote up
def add_player(self, name):
        with open(self.log_file, 'r+') as log:
            with LockFile(log):
                self._load_from_json(log)
                event = AddPlayerEvent(name, time.time())
                ret = event.process(self.players, self.games)
                self.events.append(event)
                log.seek(0)
                self._save_to_log(log)

        return ret 
Example 44
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 5 votes vote down vote up
def add_game(self, command_words):
        with open(self.log_file, 'r+') as log:
            with LockFile(log):
                self._load_from_json(log)
                event = AddGameEvent(command_words, time.time())
                ret = event.process(self.players, self.games)
                self.events.append(event)
                log.seek(0)
                self._save_to_log(log)

        return ret 
Example 45
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 5 votes vote down vote up
def to_json(self):
        ret = {}
        ret['type'] = 'AddPlayerEvent'
        ret['player'] = self.name
        ret['time'] = self.create_time
        return ret 
Example 46
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 5 votes vote down vote up
def from_json(the_json):
        assert the_json['type'] == 'AddPlayerEvent'
        return AddPlayerEvent(the_json['player'], the_json['time']) 
Example 47
Project: kicker-module   Author: EvanTheB   File: backend.py    GNU General Public License v3.0 5 votes vote down vote up
def to_json(self):
        ret = {}
        ret['type'] = 'AddGameEvent'
        ret['command'] = " ".join(self.command_words)
        ret['time'] = self.create_time
        return ret 
Example 48
Project: kicker-module   Author: EvanTheB   File: front.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self):
        self.command = ["close_game",
                        "disrupt",
                        "class_warfare",
                        "sigma",
                        "time",
                        "variety",
                        "default",
                        "slow",
                        ] 
Example 49
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def __init__(self, io, max=0):  # type: (IO, int) -> None
        """
        Constructor.
        """

        self._io = io
        self._max = 0
        self._step_width = None
        self._set_max_steps(max)
        self._step = 0
        self._percent = 0.0
        self._format = None
        self._internal_format = None
        self._format_line_count = 0
        self._last_messages_length = 0
        self._should_overwrite = True

        if not self._io.error_output.supports_ansi():
            # Disable overwrite when output does not support ANSI codes.
            self._should_overwrite = False

            # Set a reasonable redraw frequency so output isn't flooded
            self.set_redraw_frequency(max / 10)

        self._messages = {}

        self._start_time = time.time() 
Example 50
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def start(self, max=None):
        """
        Start the progress output.
        """
        self._start_time = time.time()
        self._step = 0
        self._percent = 0.0

        if max is not None:
            self._set_max_steps(max)

        self.display() 
Example 51
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def _formatter_elapsed(self):
        return format_time(time.time() - self._start_time) 
Example 52
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def _formatter_remaining(self):
        if not self._max:
            raise RuntimeError(
                "Unable to display the remaining time "
                "if the maximum number of steps is not set."
            )

        if not self._step:
            remaining = 0
        else:
            remaining = round(
                (time.time() - self._start_time) / self._step * (self._max - self._max)
            )

        return format_time(remaining) 
Example 53
Project: clikit   Author: sdispater   File: progress_bar.py    MIT License 5 votes vote down vote up
def _formatter_estimated(self):
        if not self._max:
            raise RuntimeError(
                "Unable to display the estimated time "
                "if the maximum number of steps is not set."
            )

        if not self._step:
            estimated = 0
        else:
            estimated = round((time.time() - self._start_time) / self._step * self._max)

        return estimated 
Example 54
Project: clikit   Author: sdispater   File: progress_indicator.py    MIT License 5 votes vote down vote up
def _spin(self):
        while not self._auto_running.is_set():
            self.advance()

            time.sleep(0.1) 
Example 55
Project: clikit   Author: sdispater   File: progress_indicator.py    MIT License 5 votes vote down vote up
def _get_current_time_in_milliseconds(self):
        return round(time.time() * 1000) 
Example 56
Project: clikit   Author: sdispater   File: progress_indicator.py    MIT License 5 votes vote down vote up
def _formatter_elapsed(self):
        return format_time(time.time() - self._start_time) 
Example 57
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def calc_max_lux(self, lux):
      now = time.time()
      self.med.append((now, lux))
      i = 0
      while self.med[i][0] < now - self.MEAN_TIME_S:
          i += 1
      self.med = self.med[i:]
      all_lux = [x[1] for x in self.med]
      self.max_lux = max(all_lux)
      self.med_lux = sum(all_lux) / float(len(all_lux)) 
Example 58
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 5 votes vote down vote up
def next_step(self):
      try:
          cp = self.PROFILES[self.profile]
      except:
          self.new_profile = True
          return None
      if self.profile != self.prev_profile:
          self.prev_profile = self.profile
          self.new_profile = True
      now = time.time()
      ch = self.new_profile
      while self.pstep >= len(cp['v']):
          self.pstep -= 1
      if now > self.plast + 1.0:
          if ((self.ch0 < cp['min'] or self.ch1 < cp['min'])
              and self.pstep < (len(cp['v']) - 1)):
              self.pstep += 1
              ch = True
          if ((self.ch0 > cp['max'] or self.ch1 > cp['max'])
              and self.pstep > 0):
              self.pstep -= 1
              ch = True
          self.plast = now
      if ch:
          self.new_profile = False
          return {'higain': cp['v'][self.pstep][0],
                  'mode': cp['v'][self.pstep][1]}
      return None 
Example 59
Project: webnull   Author: macrael   File: webnull.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def pretty_time(time, now=datetime.datetime.now()):
    tomorrow = now + datetime.timedelta(days=1)
    next_day = now + datetime.timedelta(days=2)
    next_week = now + datetime.timedelta(weeks=1)
    pretty_fmt = '%-I:%M %p'
    pretty_prefix = ''
    if tomorrow < time < next_day:
        pretty_prefix = 'tomorrow at '
    elif time > next_day and time < next_week:
        pretty_prefix = '%A at '
    elif time > next_week and time.month == now.month and time.year == now.year:
        pretty_prefix = '%A the %-d' + pretty_suffix(time.day) + ' at '
    elif time > next_week and time.year == now.year:
        pretty_prefix = '%B %-d' + pretty_suffix(time.day) + ' at '
    elif time > next_week:
        pretty_prefix = '%B %-d' + pretty_suffix(time.day) + ' %Y at '

    return time.strftime(pretty_prefix + pretty_fmt) 
Example 60
Project: webnull   Author: macrael   File: webnull.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def reblock_timer(duration, cleanup_func):
    if 'TEST_DURATION' in os.environ:
        duration = float(os.environ['TEST_DURATION'])

    def sigint_handler(signal, frame):
        cleanup_func()
        sys.exit(0)
    signals = [signal.SIGINT, signal.SIGHUP]
    for sig in signals:
        signal.signal(sig, sigint_handler)

    end_time = datetime.datetime.now() + datetime.timedelta(minutes=duration)
    ptime = pretty_time(end_time)
    print('allowed until ' + ptime)

    now = time.time()
    end_time = now + (duration * 60)
    while True:
        remaining = end_time - time.time()
        if remaining <= 0:
            break
        if remaining > 1000:
            time.sleep(10)
        else:
            time.sleep(1)

    cleanup_func() 
Example 61
Project: webnull   Author: macrael   File: webnull.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def allow_site(args):
    cleanup_func = None
    if args.morning:
        args.time = minutes_to_morning()
    if args.all:
        unblock_all()
        cleanup_func = reblock_all
    else:
        unblock_site(args.sitename)
        cleanup_func = lambda: nullify_site(args.sitename)

    reblock_timer(args.time, cleanup_func) 
Example 62
Project: f5go   Author: f5devcentral   File: go_test.py    MIT License 5 votes vote down vote up
def test_prettytime_should_return_today(self):
        """
        Verify case where the prettytime function should return the string 'today'
        :return:
        """
        timestamp = time.time()
        self.assertEqual('today', go.prettytime(timestamp)) 
Example 63
Project: f5go   Author: f5devcentral   File: go_test.py    MIT License 5 votes vote down vote up
def test_prettytime_should_return_yesterday(self):
        """
        Verify case where the prettytime function should return the string 'yesterday'
        :return:
        """
        timestamp = time.time() - (24 * 3600)
        self.assertEqual('yesterday', go.prettytime(timestamp)) 
Example 64
Project: f5go   Author: f5devcentral   File: go_test.py    MIT License 5 votes vote down vote up
def test_prettytime_should_return_num_of_days(self):
        """
        Verify case where the prettytime function should return the string number of days
        :return:
        """
        timestamp = time.time() - (6 * 24 * 3600)
        self.assertEqual('6 days ago', go.prettytime(timestamp)) 
Example 65
Project: f5go   Author: f5devcentral   File: go_test.py    MIT License 5 votes vote down vote up
def test_prettytime_should_return_num_of_months(self):
        """
        Verify case where the prettytime function should return the string number of months
        :return:
        """
        timestamp = time.time() - (95 * 24 * 3600)
        self.assertEqual('3 months ago', go.prettytime(timestamp)) 
Example 66
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: numpy array of string, to support multiple label
        """
        start = time.time()
        print("Start Loading Data from CSV File {}".format(
            self._source_fn))
        # split csv using both space, tab, or comma
        sep = '[\s,]+'
        try:
            df = pd.read_csv(self._source_fn, sep=sep, engine='python',
                             header=self._header)
            print("Totally {} rows loaded to parse...".format(
                len(df.index)
            ))
            # parse df to get image file name and label
            for ln in df.iterrows():
                # for each row, the first column is file name, then labels
                fn_ = ln[1][0]
                if self._root:
                    fn_ = os.path.join(self._root, fn_)
                if not os.path.exists(fn_):
                    print("File {} does not exist, skip".format(fn_))
                    continue
                # read labels: the first column is image file name
                # and others are labels (one or more)
                label_ = ln[1][1:].values
                if len(label_) == 1:
                    label_ = label_[0]
                else:
                    label_ = ":".join([str(x) for x in label_.astype(int)])
                self._labels.append(str(label_))
                # open file as binary and read in
                with open(fn_, 'rb') as image_fp:
                    datum_str_ = image_fp.read()
                    self._data.append(datum_str_)
        except:
            print sys.exc_info()[1], fn_
            raise Exception("Error in Parsing input file")
        end = time.time()
        self._labels = np.array(self._labels)
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 67
Project: Caffe-Python-Data-Layer   Author: liuxianming   File: DataManager.py    BSD 2-Clause "Simplified" License 4 votes vote down vote up
def load_all(self):
        """The function to load all data and labels

        Give:
        data: the list of raw data, needs to be decompressed
              (e.g., raw JPEG string)
        labels: 0-based labels, in format of numpy array
        """
        start = time.time()
        print("Start Loading Data from CSV File {}".format(
            self._source_fn))
        try:
            db_ = lmdb.open(self._source_fn)
            data_cursor_ = db_.begin().cursor()
            if self._label_fn:
                label_db_ = lmdb.open(self._label_fn)
                label_cursor_ = label_db_.begin().cursor()
            # begin reading data
            if self._label_fn:
                label_cursor_.first()
            while data_cursor_.next():
                value_str = data_cursor_.value()
                datum_ = caffe_pb2.Datum()
                datum_.ParseFromString(value_str)
                self._data.append(datum_.data)
                if self._label_fn:
                    label_cursor_.next()
                    label_datum_ = caffe_pb2.Datum()
                    label_datum_.ParseFromString(label_cursor_.value())
                    label_ = caffe.io.datum_to_array(label_datum_)
                    label_ = ":".join([str(x) for x in label_.astype(int)])
                else:
                    label_ = str(datum_.label)
                self._labels.appen(label_)
            # close all db
            db_.close()
            if self._label_fn:
                label_db_.close()
        except:
            raise Exception("Error in Parsing input file")
        end = time.time()
        self._labels = np.array(self._labels)
        print("Loading {} samples Done: Time cost {} seconds".format(
            len(self._data), end - start))

        return self._data, self._labels 
Example 68
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 4 votes vote down vote up
def generate_combined_bed_file(self):
        # execute read conversion in parallel
        print("** Converting reads to bed format for {} libraries...".format(
            len(self._exp_lib_list)), flush=True)
        exp_lib_dict = {lib_name: self._lib_dict[lib_name] for lib_name in
                        self._exp_lib_list}
        t_start = time()
        with futures.ProcessPoolExecutor(
                max_workers=self._max_proc) as executor:
            future_to_lib_name = {
                executor.submit(lib.merge_reads):
                lib.lib_name for lib in exp_lib_dict.values()}
        for future in futures.as_completed(future_to_lib_name):
            lib_name = future_to_lib_name[future]
            try:
                self._lib_dict[lib_name].replicon_dict = future.result()
            except Exception as exc:
                print("{} generated an exception: {}".format(lib_name, exc),
                      flush=True)
        for replicon in sorted(self._replicon_dict):
            self._replicon_dict[replicon]["reads"] = pd.Series()
            for lib_name, lib in exp_lib_dict.items():
                self._replicon_dict[replicon]["reads"] = self._replicon_dict[
                    replicon]["reads"].add(lib.replicon_dict[replicon][
                        "reads"], fill_value=0)
            self._replicon_dict[replicon]["reads"] = self._replicon_dict[
                replicon]["reads"].reset_index(name="count")
            split_index = pd.DataFrame(list(self._replicon_dict[replicon][
                "reads"]["index"].str.split(',')), columns=[
                    "start", "end", "strand"])
            split_index.loc[:, ["start", "end"]] = split_index.loc[
                :, ["start", "end"]].apply(pd.to_numeric)
            del self._replicon_dict[replicon]["reads"]["index"]
            self._replicon_dict[replicon]["reads"] = split_index.join(
                self._replicon_dict[replicon]["reads"]).sort_values(
                    ["strand", "start", "end"], ascending=[False, True, True])
            self._replicon_dict[replicon]["reads"]["replicon"] = replicon
            self._replicon_dict[replicon]["reads"]["tag_id"] = (
                self._replicon_dict[replicon]["reads"].index + 1).map(
                'tag_{:.0f}'.format)
            self._replicon_dict[replicon]["reads"] = self._replicon_dict[
                replicon]["reads"].loc[:,
                                       ["replicon",
                                        "start",
                                        "end",
                                        "tag_id",
                                        "count",
                                        "strand"]]
            # create blockbuster input folder if it does not exist
            self._blockbuster_input_folder = "{}/blockbuster_input".format(
                    self._output_folder)
            if not exists(self._blockbuster_input_folder):
                makedirs(self._blockbuster_input_folder)
            self._replicon_dict[replicon]["reads"].to_csv(
                "{}/{}_sorted_reads_for_blockbuster.bed".format(
                    self._blockbuster_input_folder, replicon),
                sep='\t', header=False, index=False, encoding='utf-8')
        t_end = time()
        print("Reads converted to bed format in {} seconds.\n".format(
            t_end-t_start), flush=True) 
Example 69
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 4 votes vote down vote up
def run_deseq2_analysis(self, size_factors, pairwise_replicates):
        count_df = self._peak_df.loc[:, self._exp_lib_list +
                                     self._ctr_lib_list]
        deseq2_runner = DESeq2Runner(count_df)
        result_df, self._size_factors = deseq2_runner.run_deseq2(
            self._exp_lib_list, self._ctr_lib_list, size_factors,
            pairwise_replicates)
        # normalize counts
        self._peak_df[self._lib_names_list] = self._peak_df[
            self._lib_names_list].div(self._size_factors, axis='columns')
        # append DESeq2 output
        self._peak_df = pd.concat([self._peak_df, result_df], axis=1)
        # write initial peaks
        peak_columns = (["replicon",
                         "peak_start",
                         "peak_end",
                         "peak_strand"] +
                        [lib_name for lib_name in self._lib_dict] +
                        ["baseMean",
                         "log2FoldChange",
                         "lfcSE",
                         "stat",
                         "pvalue",
                         "padj"])
        self._peak_df.loc[:, peak_columns].to_csv(
            "{}/initial_peaks.csv".format(self._output_folder),
            sep='\t', na_rep='NA', index=False, encoding='utf-8')
        # filter peaks
        print("* Filtering peaks...", flush=True)
        sig_peak_df = self._filter_peaks(self._peak_df)
        unsig_peak_df = self._peak_df[~self._peak_df.index.isin(
            sig_peak_df.index)]
        # plot peaks
        print("* Plotting initial peaks...", flush=True)
        t_start = time()
        self._plot_initial_peaks(unsig_peak_df.baseMean,
                                 np.power(2.0, unsig_peak_df.log2FoldChange),
                                 sig_peak_df.baseMean,
                                 np.power(2.0, sig_peak_df.log2FoldChange))
        t_end = time()
        print("Plotting took {} seconds.".format(t_end-t_start), flush=True)
        self._peak_df = sig_peak_df 
Example 70
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 4 votes vote down vote up
def run_analysis_without_replicates(self, size_factors):
        # check if size factors were defined and otherwise calculate them based
        # on DESeq normalization
        if size_factors is None:
            deseq2_runner = DESeq2Runner(self._peak_df[self._lib_names_list])
            self._size_factors = deseq2_runner.calc_size_factors()
        else:
            self._size_factors = size_factors
        # normalize counts
        self._peak_df[self._lib_names_list] = self._peak_df[
            self._lib_names_list].div(self._size_factors, axis='columns')
        # calculate base means for all peaks
        self._peak_df["base_means"] = self._peak_df.loc[
            :, self._lib_names_list].mean(axis=1)
        # calculate fcs for all peaks
        self._peak_df["fold_change"] = (
            self._peak_df.loc[:, self._exp_lib_list].sum(axis=1) /
            self._peak_df.loc[:, self._ctr_lib_list].sum(axis=1))
        # write initial peaks
        peak_columns = (["replicon",
                         "peak_start",
                         "peak_end",
                         "peak_strand"] +
                        [lib_name for lib_name in self._lib_dict] +
                        ["base_means",
                         "fold_change"])
        self._peak_df.loc[:, peak_columns].to_csv(
            "{}/initial_peaks.csv".format(self._output_folder),
            sep='\t', na_rep='NA', index=False, encoding='utf-8')
        # filter peaks
        print("* Filtering peaks...", flush=True)
        sig_peak_df = self._filter_peaks_without_replicates(self._peak_df)
        unsig_peak_df = self._peak_df[~self._peak_df.index.isin(
            sig_peak_df.index)]
        # plot peaks
        print("* Plotting initial peaks...", flush=True)
        t_start = time()
        self._plot_initial_peaks(unsig_peak_df.base_means,
                                 unsig_peak_df.fold_change,
                                 sig_peak_df.base_means,
                                 sig_peak_df.fold_change)
        t_end = time()
        print("Plotting took {} seconds.".format(t_end-t_start), flush=True)
        self._peak_df = sig_peak_df 
Example 71
Project: weibo-login   Author: littlepinecone   File: login.py    GNU General Public License v3.0 4 votes vote down vote up
def getcookies(user, passwd):
    # 获取验证码
    sign = random.random()
    url = "https://captcha.weibo.com/api/pattern/get?ver=daf139fb2696a4540b298756bd06266a&source=ssologin&usrname=" + user + "&line=160&side=100&radius=30&_rnd=" + str(
        sign) + "&callback=pl_cb"
    r = requests.get(url)
    imgdata = json.loads(r.text.replace("pl_cb(", '').replace(")", ''))['path_enc']
    id = json.loads(r.text.replace("pl_cb(", '').replace(")", ''))['id']
    recombinePattern(imgdata)
    data_enc = pathdataEncode(path_generate(patterntohash()))
    path_enc = pathEncode(patterntohash(), id)

    url2 = "https://captcha.weibo.com/api/pattern/verify?ver=daf139fb2696a4540b298756bd06266a&id=" + id + "&usrname=" + user + "&source=ssologin&path_enc=" + path_enc + "&data_enc=" + data_enc + "&callback=pl_cb"
    url3 = 'https://passport.weibo.cn/sso/login'
    # 必要的等待时间
    time.sleep(1)
    # 验证验证码
    session = requests.Session()
    r2 = session.get(url2)
    # print r2.headers
    print json.loads(r2.text.replace("pl_cb(", '').replace(")", ''))['msg']
    # print id

    formdata = {'username': user,
                'password': passwd,
                'savestate': '1',
                'ec': '0',
                'entry': 'mweibo',
                'mainpageflag': '1',
                'vid': id,
                'wentry': '',
                'loginfrom': '',
                'client_id': '',
                'code:qq': '',
                'r': '',
                'pagerefer': '',
                'hff': '',
                'hfp': ''}

    # print formdata['vid']
    # 登录
    r3 = session.post(url3, data=formdata, headers=headers3)
    cookies_url = r3.headers['Set-Cookie']
    print json.loads(r3.content)['msg']
    return {k.split('=')[0]: k.split('=')[1] for k in cookies_url.split(';')}

    # r4 = requests.get('https://m.weibo.cn/')
    # print r4.headers['Set-Cookie'] 
Example 72
Project: incubator-spot   Author: apache   File: flow_oa.py    Apache License 2.0 4 votes vote down vote up
def _ingest_summary(self):
        # get date parameters.
        yr = self._date[:4]
        mn = self._date[4:6]
        dy = self._date[6:]

        self._logger.info("Getting ingest summary data for the day")
        
        ingest_summary_cols = ["date","total"]		
        result_rows = []        
        df_filtered =  pd.DataFrame()

        # get ingest summary.

        query_to_load=("""
                SELECT tryear, trmonth, trday, trhour, trminute, COUNT(*) as total
                FROM {0}.{1} WHERE y={2} AND m={3} AND d={4}
                AND unix_tstamp IS NOT NULL
                AND sip IS NOT NULL
                AND sport IS NOT NULL
                AND dip IS NOT NULL
                AND dport IS NOT NULL
                AND ibyt IS NOT NULL
                AND ipkt IS NOT NULL
                AND tryear={2}
                AND cast(treceived as timestamp) IS NOT NULL
                GROUP BY tryear, trmonth, trday, trhour, trminute;
        """).format(self._db,self._table_name, yr, mn, dy)
        
        results = impala.execute_query(query_to_load) 
 
        if results:
            df_results = as_pandas(results) 
            
            #Forms a new dataframe splitting the minutes from the time column
            df_new = pd.DataFrame([["{0}-{1}-{2} {3}:{4}".format(val['tryear'],val['trmonth'],val['trday'], val['trhour'], val['trminute']), int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df_results.iterrows()],columns = ingest_summary_cols)
            value_string = ''
            #Groups the data by minute 

            sf = df_new.groupby(by=['date'])['total'].sum()
            df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
            
            df_final = df_filtered.append(df_per_min, ignore_index=True).to_records(False,False) 
            if len(df_final) > 0:
                query_to_insert=("""
                    INSERT INTO {0}.flow_ingest_summary PARTITION (y={1}, m={2}, d={3}) VALUES {4};
                """).format(self._db, yr, mn, dy, tuple(df_final))

                impala.execute_query(query_to_insert)
                
        else:
            self._logger.info("No data found for the ingest summary") 
Example 73
Project: incubator-spot   Author: apache   File: proxy_oa.py    Apache License 2.0 4 votes vote down vote up
def _ingest_summary(self): 
        # get date parameters.
        yr = self._date[:4]
        mn = self._date[4:6]
        dy = self._date[6:]

        self._logger.info("Getting ingest summary data for the day")
        
        ingest_summary_cols = ["date","total"]		
        result_rows = []        
        df_filtered =  pd.DataFrame()

        # get ingest summary.

        query_to_load=("""
                SELECT p_date, p_time, COUNT(*) as total
                FROM {0}.{1} WHERE y='{2}' AND m='{3}' AND d='{4}'
                AND p_date IS NOT NULL AND p_time IS NOT NULL
                AND clientip IS NOT NULL AND p_time != ''
                AND host IS NOT NULL AND fulluri IS NOT NULL
                GROUP BY p_date, p_time;
        """).format(self._db,self._table_name, yr, mn, dy)
        
        results = impala.execute_query(query_to_load) 
 
        if results:
            df_results = as_pandas(results)
            #Forms a new dataframe splitting the minutes from the time column/
            df_new = pd.DataFrame([["{0} {1}:{2}".format(val['p_date'], val['p_time'].split(":")[0].zfill(2), val['p_time'].split(":")[1].zfill(2)), int(val['total']) if not math.isnan(val['total']) else 0 ] for key,val in df_results.iterrows()],columns = ingest_summary_cols)
            value_string = ''
            #Groups the data by minute 
            sf = df_new.groupby(by=['date'])['total'].sum()
            df_per_min = pd.DataFrame({'date':sf.index, 'total':sf.values})
            
            df_final = df_filtered.append(df_per_min, ignore_index=True).to_records(False,False) 
            if len(df_final) > 0:
                query_to_insert=("""
                    INSERT INTO {0}.proxy_ingest_summary PARTITION (y={1}, m={2}, d={3}) VALUES {4};
                """).format(self._db, yr, mn, dy, tuple(df_final))

                impala.execute_query(query_to_insert) 
                
        else:
            self._logger.info("No data found for the ingest summary") 
Example 74
Project: Wide-Residual-Nets-for-SETI   Author: sgrvinod   File: test.py    Apache License 2.0 4 votes vote down vote up
def test(test_loader, model):
    """
    Perform testing.
    """

    model.eval()  # eval mode

    all_probs = []
    all_uuids = []

    batch_time = AverageMeter()  # forward prop. time this batch

    start = time.time()

    softmax = torch.nn.Softmax()  # need this, since there is no longer a loss layer

    for i, (input, uuids) in enumerate(test_loader):

        softmax.zero_grad()

        # Store UUIDs associated with this batch, in the right order
        uuids = list(uuids.numpy().ravel())
        all_uuids.extend(uuids)

        input_var = torch.autograd.Variable(input, volatile=True).cuda()

        output = model(input_var)
        probs = softmax(output)
        all_probs.append(probs.data)

        batch_time.update(time.time() - start)
        start = time.time()

        if i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(i, len(test_loader),
                                                                                    batch_time=batch_time))
    all_probs = torch.cat(all_probs).cpu()  # concatenate probs from all batches, move to CPU
    all_uuids = [uuid_index_mapping[i] for i in all_uuids]  # convert UUID indices to UUIDs

    # Create dataframe and store as CSV
    df1 = pd.DataFrame({'UUIDs': pd.Series(all_uuids)})
    df2 = pd.DataFrame(all_probs.numpy())
    df = pd.concat([df1, df2], axis=1)
    csv_path = './TESTRESULTS__' + args.checkpoint.split('/')[-1] + '__' + args.h5data.split('/')[-1] + '.csv'
    df.to_csv(csv_path, header=False, index=False)
    print("\nSaved results to {0}\n".format(csv_path)) 
Example 75
Project: Wide-Residual-Nets-for-SETI   Author: sgrvinod   File: test_cpu.py    Apache License 2.0 4 votes vote down vote up
def test(test_loader, model):
    """
    Perform testing.
    """

    print('Perform testing')

    model.eval()  # eval mode

    all_probs = []
    all_uuids = []

    batch_time = AverageMeter()  # forward prop. time this batch

    start = time.time()

    softmax = torch.nn.Softmax()  # need this, since there is no longer a loss layer

    for i, (input, uuids) in enumerate(test_loader):

        softmax.zero_grad()

        # Store UUIDs associated with this batch, in the right order
        uuids = list(uuids.numpy().ravel())
        all_uuids.extend(uuids)

        input_var = torch.autograd.Variable(input, volatile=True).cpu()

        output = model(input_var)
        probs = softmax(output)
        
        all_probs.append(probs.data)

        batch_time.update(time.time() - start)
        start = time.time()

        if i % args.print_freq == 0:
            print('Test: [{0}/{1}]\t'
                  'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'.format(i, len(test_loader),
                                                                                    batch_time=batch_time))
    all_probs = torch.cat(all_probs).cpu()  # concatenate probs from all batches, move to CPU
    all_uuids = [uuid_index_mapping[i] for i in all_uuids]  # convert UUID indices to UUIDs

    # Create dataframe and store as CSV
    df1 = pd.DataFrame({'UUIDs': pd.Series(all_uuids)})
    df2 = pd.DataFrame(all_probs.numpy())
    df = pd.concat([df1, df2], axis=1)
    csv_path = './TESTRESULTS__' + args.checkpoint.split('/')[-1] + '__' + args.h5data.split('/')[-1] + '.csv'
    df.to_csv(csv_path, header=False, index=False)
    print("\nSaved results to {0}\n".format(csv_path)) 
Example 76
Project: Wide-Residual-Nets-for-SETI   Author: sgrvinod   File: train.py    Apache License 2.0 4 votes vote down vote up
def train(train_loader, model, criterion, optimizer, epoch):
    """
    Perform one epoch's training.
    """

    batch_time = AverageMeter()  # forward prop. + gradient descent time this batch
    data_time = AverageMeter()  # data loading time this batch
    losses = AverageMeter()  # loss this batch
    top1 = AverageMeter()  # (top1) accuracy this batch

    model.train()  # train mode

    start = time.time()

    for i, (input, target) in enumerate(train_loader):
        data_time.update(time.time() - start)

        input_var = torch.autograd.Variable(input).cuda()
        target = target.cuda(async=True)
        target_var = torch.autograd.Variable(target)

        output = model(input_var)
        loss = criterion(output, target_var)

        acc = accuracy(output.data, target)
        losses.update(loss.data[0], input.size(0))
        top1.update(acc, input.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - start)
        start = time.time()

        if i % args.print_freq == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Accuracy {top1.val:.3f} ({top1.avg:.3f})'.format(
                epoch, i, len(train_loader), batch_time=batch_time,
                data_time=data_time, loss=losses, top1=top1)) 
Example 77
Project: Wide-Residual-Nets-for-SETI   Author: sgrvinod   File: train.py    Apache License 2.0 4 votes vote down vote up
def validate(val_loader, model, criterion):
    """
    Perform validation after each training cycle.

    Returns:
        top1.avg (float): Average accuracy on the validation data
    """

    batch_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()

    model.eval()  # eval mode

    start = time.time()

    for i, (input, target) in enumerate(val_loader):

        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True).cuda()
        target_var = torch.autograd.Variable(target, volatile=True).cuda()

        output = model(input_var)
        loss = criterion(output, target_var)

        acc = accuracy(output.data, target)
        losses.update(loss.data[0], input.size(0))
        top1.update(acc, input.size(0))

        batch_time.update(time.time() - start)
        start = time.time()

        if i % args.print_freq == 0:
            print('Validation: [{0}/{1}]\t'
                  'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                  'Accuracy {top1.val:.3f} ({top1.avg:.3f})'.format(
                i, len(val_loader), batch_time=batch_time, loss=losses,
                top1=top1))

    print('\n * Accuracy {top1.avg:.3f}\n'
          .format(top1=top1))

    return top1.avg 
Example 78
Project: kicker-module   Author: EvanTheB   File: front.py    GNU General Public License v3.0 4 votes vote down vote up
def get_heuristic(self, ladder, players, games, command):

        close_game = heuristics.DrawChanceHeuristic(ladder)

        linear_10 = heuristics.linear_clamped_function(0., 0., 10., 1.)
        disrupt = heuristics.LadderDisruptionHeuristic(
            ladder, players, games, linear_10)

        linear_3_10 = heuristics.linear_clamped_function(
            3. * 4., 1., 10. * 4., 0.)
        close_skills = heuristics.TrueskillClumpingHeuristic(
            ladder.process(players, games), linear_3_10)

        linear_week_month = heuristics.linear_clamped_function(
            time.time() - 7. * 24. * 60. * 60., 0.,
            time.time() - 30. * 24. * 60. * 60., 1.)
        playmore = heuristics.TimeSinceLastPlayedHeuristic(
            players, games, linear_week_month)

        linear_0_30 = heuristics.linear_clamped_function(0., 1., 30., 0.)
        variety = heuristics.UnplayedMatchupsHeuristic(
            players, games, linear_0_30)

        lin_heur = [
            (1.5, close_game),
            (1., close_skills),
            (0.5, variety),
        ]
        default = heuristics.LinearSumHeuristic(lin_heur)
        if command == "fast":
            return default

        slow_lin_heur = [
            # (1., close_game),
            (0.5, close_skills),
            (2., disrupt),
            (0.5, variety),
            (0.5, playmore),
        ]
        slow = heuristics.LinearSumHeuristic(slow_lin_heur)
        if command == "slow":
            return slow 
Example 79
Project: BlueLightMeter   Author: chripell   File: nordic_uart.py    Apache License 2.0 4 votes vote down vote up
def __init__(self):
        hci_path = self.BLUEZ_PATH + '/' + args.hci_interface
        bus = dbus.SystemBus(self.BLUEZ)
        hci = bus.get_object(self.BLUEZ, hci_path)
        self.blm_ = None
        checked = []
        hci.StartDiscovery(dbus_interface='org.bluez.Adapter1')
        if args.mac_address == None:
            print 'Scanning for ', args.name
            start = time.time()
            while time.time() < start + args.timeout and not self.blm_:
                root = ET.fromstring(hci.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable'))
                time.sleep(1)
                for ch in root:
                    if ch.tag == 'node':
                        dname = ch.attrib['name']
                        if dname and not dname in checked:
                            checked.append(dname)
                            path = hci_path +'/' + dname
                            dev = bus.get_object(self.BLUEZ, path)
                            devp = dbus.Interface(dev, 'org.freedesktop.DBus.Properties')
                            name = devp.Get('org.bluez.Device1', 'Name')
                            if name == args.name:
                                print('Found: %s' % dname[4:].replace('_', ':'))
                                self.path_ = path
                                self.blm_ = dev
                                self.blmp_ = devp
                                self.blmd_ = dbus.Interface(dev, 'org.bluez.Device1')
                                break
        else:
            print('Connecting to MAC %s' % args.mac_address)
            path = hci_path +'/dev_' + args.mac_address.replace(':', '_')
            self.blm_ = bus.get_object(self.BLUEZ, path)
            self.blmp_ = dbus.Interface(self.blm_, 'org.freedesktop.DBus.Properties')
            self.blmd_ = dbus.Interface(self.blm_, 'org.bluez.Device1')
            
        self.blmd_.Connect()
        self.connected_ = False
        start = time.time()
        while time.time() < start + args.timeout and not self.connected_:
            self.connected_ = self.prop_get('Connected')
        if not self.connected_:
            print('Failed to connect')
            self.blmd_.Disconnect()
        print('Connected')

        gatt_read_path = self.path_ + '/service000b/char000c'
        gatt_read = bus.get_object(self.BLUEZ, gatt_read_path)
        self.gatt_read_ = dbus.Interface(gatt_read, 'org.bluez.GattCharacteristic1')
        self.gatt_read_.StartNotify()

        gatt_write_path = self.path_ + '/service000b/char000f'
        gatt_write = bus.get_object(self.BLUEZ, gatt_write_path)
        self.gatt_write_ = dbus.Interface(gatt_write, 'org.bluez.GattCharacteristic1') 
Example 80
Project: BlueLightMeter   Author: chripell   File: blm_client.py    Apache License 2.0 4 votes vote down vote up
def __init__(self):
        hci_path = self.BLUEZ_PATH + '/' + args.hci_interface
        bus = dbus.SystemBus(self.BLUEZ)
        hci = bus.get_object(self.BLUEZ, hci_path)

        self.blm_ = None
        checked = []
        hci.StartDiscovery(dbus_interface='org.bluez.Adapter1')
        if args.mac_address == None:
            print('Scanning for BlueLightMeter')
            start = time.time()
            while time.time() < start + args.timeout and not self.blm_:
                root = ET.fromstring(hci.Introspect(dbus_interface='org.freedesktop.DBus.Introspectable'))
                time.sleep(1)
                for ch in root:
                    if ch.tag == 'node':
                        dname = ch.attrib['name']
                        if dname and not dname in checked:
                            checked.append(dname)
                            path = hci_path +'/' + dname
                            dev = bus.get_object(self.BLUEZ, path)
                            devp = dbus.Interface(dev, 'org.freedesktop.DBus.Properties')
                            name = devp.Get('org.bluez.Device1', 'Name')
                            if name == args.name:
                                print('Found: %s' % dname[4:].replace('_', ':'))
                                self.path_ = path
                                self.blm_ = dev
                                self.blmp_ = devp
                                self.blmd_ = dbus.Interface(dev, 'org.bluez.Device1')
                                break
        else:
            print('Connecting to BlueLightMeter with MAC %s' % args.mac_address)
            path = hci_path +'/dev_' + args.mac_address.replace(':', '_')
            self.blm_ = bus.get_object(self.BLUEZ, path)
            self.blmp_ = dbus.Interface(self.blm_, 'org.freedesktop.DBus.Properties')
            self.blmd_ = dbus.Interface(self.blm_, 'org.bluez.Device1')

        self.blmd_.Connect()
        self.connected_ = False
        start = time.time()
        while time.time() < start + args.timeout and not self.connected_:
            self.connected_ = self.prop_get('Connected')
        if not self.connected_:
            print('Failed to connect')
            self.blmd_.Disconnect()
        print('Connected')
        time.sleep(3)

        gatt_read_path = self.path_ + '/service000c/char000d'
        gatt_read = bus.get_object(self.BLUEZ, gatt_read_path)
        self.gatt_read_ = dbus.Interface(gatt_read, 'org.bluez.GattCharacteristic1')

        gatt_write_path = self.path_ + '/service000c/char0010'
        gatt_write = bus.get_object(self.BLUEZ, gatt_write_path)
        self.gatt_write_ = dbus.Interface(gatt_write, 'org.bluez.GattCharacteristic1')