Python timeit.default_timer() Examples

The following are 30 code examples of timeit.default_timer(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module timeit , or try the search function .
Example #1
Source File: poolImprovement.py    From Learning-Concurrency-in-Python with MIT License 8 votes vote down vote up
def main():

  t1 = timeit.default_timer()
  with ProcessPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))

  print("{} Seconds Needed for ProcessPoolExecutor".format(timeit.default_timer() - t1))
  
  t2 = timeit.default_timer()
  with ThreadPoolExecutor(max_workers=4) as executor:
        for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
            print('%d is prime: %s' % (number, prime))
  print("{} Seconds Needed for ThreadPoolExecutor".format(timeit.default_timer() - t2))

  t3 = timeit.default_timer()
  for number in PRIMES:
    isPrime = is_prime(number)
    print("{} is prime: {}".format(number, isPrime))
  print("{} Seconds needed for single threaded execution".format(timeit.default_timer()-t3)) 
Example #2
Source File: v2_validation.py    From Attentive-Filtering-Network with MIT License 7 votes vote down vote up
def best_eer(val_scores, utt2len, utt2label, key_list):
    
    def f_neg(threshold):
        ## Scipy tries to minimize the function
        return utt_eer(val_scores, utt2len, utt2label, key_list, threshold)
    
    # Initialization of best threshold search
    thr_0 = [0.20] * 1 # binary class
    constraints = [(0.,1.)] * 1 # binary class
    def bounds(**kwargs):
        x = kwargs["x_new"]
        tmax = bool(np.all(x <= 1))
        tmin = bool(np.all(x >= 0))
        return tmax and tmin

    # Search using L-BFGS-B, the epsilon step must be big otherwise there is no gradient
    minimizer_kwargs = {"method": "L-BFGS-B",
                        "bounds":constraints,
                        "options":{
                            "eps": 0.05
                            }
                       }

    # We combine L-BFGS-B with Basinhopping for stochastic search with random steps
    logger.info("===> Searching optimal threshold for each label")
    start_time = timer()

    opt_output = basinhopping(f_neg, thr_0,
                                stepsize = 0.1,
                                minimizer_kwargs=minimizer_kwargs,
                                niter=10,
                                accept_test=bounds)

    end_time = timer()
    logger.info("===> Optimal threshold for each label:\n{}".format(opt_output.x))
    logger.info("Threshold found in: %s seconds" % (end_time - start_time))

    score = opt_output.fun
    return score, opt_output.x 
Example #3
Source File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    From L.E.S.M.A with Apache License 2.0 7 votes vote down vote up
def run(self):
		request = self.request
		try:
			if ((timeit.default_timer() - self.starttime) <= self.timeout and
					not SHUTDOWN_EVENT.isSet()):
				try:
					f = urlopen(request)
				except TypeError:
					# PY24 expects a string or buffer
					# This also causes issues with Ctrl-C, but we will concede
					# for the moment that Ctrl-C on PY24 isn't immediate
					request = build_request(self.request.get_full_url(),
											data=request.data.read(self.size))
					f = urlopen(request)
				f.read(11)
				f.close()
				self.result = sum(self.request.data.total)
			else:
				self.result = 0
		except (IOError, SpeedtestUploadTimeout):
			self.result = sum(self.request.data.total) 
Example #4
Source File: scf.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def get_k(self, dm=None, **kw):
    '''Compute K matrix for the given density matrix.'''
    from pyscf.nao.m_kmat_den import kmat_den
    if dm is None: dm = self.make_rdm1()
    
    if False:
      print(__name__, ' get_k: self.kmat_algo ', self.kmat_algo, dm.shape)
      if len(dm.shape)==5:
        print(__name__, 'nelec dm', (dm[0,:,:,:,0]*self.overlap_lil().toarray()).sum())
      elif len(dm.shape)==2 or len(dm.shape)==3:
        print(__name__, 'nelec dm', (dm*self.overlap_lil().toarray()).sum())
      else:
        print(__name__, dm.shape)
    
    kmat_algo = kw['kmat_algo'] if 'kmat_algo' in kw else self.kmat_algo

    #if self.verbosity>1: print(__name__, "\t\t====> Matrix elements of Fock exchange operator will be calculated by using '{}' algorithm.\f".format(kmat_algo))
    return kmat_den(self, dm=dm, algo=kmat_algo, **kw)

    if self.kmat_timing is not None: t1 = timer()
    kmat = kmat_den(self, dm=dm, algo=kmat_algo, **kw)
    if self.kmat_timing is not None: self.kmat_timing += timer()-t1
    return kmat 
Example #5
Source File: stac_validator.py    From stac-validator with Apache License 2.0 6 votes vote down vote up
def main():
    args = docopt(__doc__)
    follow = args.get("--follow")
    stac_file = args.get("<stac_file>")
    stac_spec_dirs = args.get("--spec_dirs", None)
    version = args.get("--version")
    verbose = args.get("--verbose")
    nthreads = args.get("--threads", 10)
    timer = args.get("--timer")
    log_level = args.get("--log_level", "CRITICAL")

    if timer:
        start = default_timer()

    stac = StacValidate(stac_file, stac_spec_dirs, version, log_level, follow)
    _ = stac.run(nthreads)
    shutil.rmtree(stac.dirpath)

    if verbose:
        print(json.dumps(stac.message, indent=4))
    else:
        print(json.dumps(stac.status, indent=4))

    if timer:
        print(f"Validator took {default_timer() - start:.2f} seconds") 
Example #6
Source File: test_edgeembeds.py    From EvalNE with MIT License 6 votes vote down vote up
def time_test():

    # Create a dictionary simulating the node embeddings
    keys = map(str, range(100))
    vals = np.random.randn(100, 10)
    d = dict(zip(keys, vals))

    # Create set of edges
    num_edges = 1000000
    edges = list(zip(np.random.randint(0, 100, num_edges), np.random.randint(0, 100, num_edges)))

    start = timeit.default_timer()
    res = edge_embeddings.compute_edge_embeddings(d, edges, "average")
    end = timeit.default_timer() - start

    print("Processed in: {}".format(end)) 
Example #7
Source File: utilities.py    From pytim with GNU General Public License v3.0 6 votes vote down vote up
def lap(show=False):
    """ Timer function

        :param bool show: (optional) print timer information to stderr
    """

    if not hasattr(lap, "tic"):
        lap.tic = timer()
    else:
        toc = timer()
        dt = toc - lap.tic
        lap.tic = toc
        if show:
            stderr.write("LAP >>> " + str(dt) + "\n")
        return dt 
Example #8
Source File: m_prod_basis_obsolete.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def init_prod_basis_pp(self, sv, **kvargs):
    """ Talman's procedure should be working well with Pseudo-Potential starting point."""
    from pyscf.nao.m_prod_biloc import prod_biloc_c

    #t1 = timer()    
    self.init_inp_param_prod_log_dp(sv, **kvargs)
    data = self.chain_data()
    libnao.init_vrtx_cc_apair(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)))
    self.sv_pbloc_data = True
    
    #t2 = timer(); print(t2-t1); t1=timer();
    self.bp2info = [] # going to be some information including indices of atoms, list of contributing centres, conversion coefficients
    for ia1 in range(sv.natoms):
      rc1 = sv.ao_log.sp2rcut[sv.atom2sp[ia1]]
      for ia2 in range(ia1+1,sv.natoms):
        rc2,dist = sv.ao_log.sp2rcut[sv.atom2sp[ia2]], sqrt(((sv.atom2coord[ia1]-sv.atom2coord[ia2])**2).sum())
        if dist>rc1+rc2 : continue
        pbiloc = self.comp_apair_pp_libint(ia1,ia2)
        if pbiloc is not None : self.bp2info.append(pbiloc)
    
    self.dpc2s,self.dpc2t,self.dpc2sp = self.init_c2s_domiprod() # dominant product's counting
    self.npdp = self.dpc2s[-1]
    self.norbs = self.sv.norbs
    return self 
Example #9
Source File: m_log_interp.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def interp_rcut(self, ff, rr, rcut=None):
    """ Interpolation of vector data ff[...,:] and vector arguments rr[:] """
    assert ff.shape[-1]==self.nr
    ffa = ff.reshape(ff.size//self.nr, self.nr)
    if rcut is None: rcut = self.gg[-1]
    rra = rr.reshape(-1) if type(rr)==np.ndarray else np.array([rr])
    
    #t0 = timer()
    r2l,r2k,ir2cc = self.coeffs_rcut(rra, rcut)
    #t1 = timer()
    fr2v = np.zeros(ffa.shape[0:-1]+rra.shape[:])
    #print(__name__, fr2v.shape, fr2v[:,r2l[0]].shape, r2l[0].shape)
    #print(__name__, 'ff ', type(ff))
    for j in range(6): fr2v[:,r2l[0]]+= ffa[:,r2k+j]*ir2cc[j]
    #t2 = timer()
    #print(__name__, 'times: ', t1-t0, t2-t1)
    return fr2v.reshape((ff.shape[0:-1]+rr.shape[:])) 
Example #10
Source File: v1.py    From terraform-templates with Apache License 2.0 6 votes vote down vote up
def _wall_time(x):
    from functools import wraps
    from timeit import default_timer

    @wraps(x)
    def wrapper(self, *args, **kwargs):
        start = default_timer()
        r = x(self, *args, **kwargs)
        end = default_timer()

        secs = end-start
        r.wall_time = secs

        time_str = 'wall time %.2f seconds' % secs
        if logging.getLogger(__name__).getEffectiveLevel() == DEBUG1:
            self._log(DEBUG1, '%s() %s' %
                      (x.__name__, time_str))
        elif (logging.getLogger(__name__).getEffectiveLevel() in
              [DEBUG2, DEBUG3]):
            self._log(DEBUG2, '%s(%s, %s) %s' %
                      (x.__name__, args, kwargs, time_str))
        return r

    return wrapper 
Example #11
Source File: parametric_GP.py    From ParametricGP with MIT License 6 votes vote down vote up
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))
        
        X_tf = tf.placeholder(tf.float64)
        y_tf = tf.placeholder(tf.float64)
        hyp_tf = tf.Variable(self.hyp, dtype=tf.float64)
        
        train = self.likelihood(hyp_tf, X_tf, y_tf)
        
        init = tf.global_variables_initializer()
        self.sess.run(init)
        
        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            X_batch, y_batch = fetch_minibatch(self.X,self.y,self.N_batch)
            self.sess.run(train, {X_tf:X_batch, y_tf:y_batch})
            
            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                nlml = self.sess.run(self.nlml)
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        self.hyp = self.sess.run(hyp_tf) 
Example #12
Source File: m_rf0_den.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def rf0_cmplx_ref(self, ww):
  """ Full matrix response in the basis of atom-centered product functions """
  rf0 = np.zeros((len(ww), self.nprod, self.nprod), dtype=self.dtypeComplex)
  v = self.pb.get_ac_vertex_array()
  
  t1 = timer()
  if self.verbosity>1: print(__name__, 'self.ksn2e', self.ksn2e, len(ww))
      
  zvxx_a = zeros((len(ww), self.nprod), dtype=self.dtypeComplex)
  for s in range(self.nspin):
    n2e = self.ksn2e[0,s,:]
    n2f = self.ksn2f[0,s,:]
    n2x = self.x[s,:,:]
    for en,fn,xn in zip(n2e,n2f,n2x):
      vx = dot(v, xn)
      for em,fm,xm in zip(n2e,n2f,n2x):
        vxx_a = dot(vx, xm.T)
        for iw,comega in enumerate(ww):
          zvxx_a[iw,:] = vxx_a * (fn - fm)/ (comega - (em - en))
        rf0 += einsum('wa,b->wab', zvxx_a, vxx_a)
  
  t2 = timer()
  if self.verbosity>0: print(__name__, 'rf0_ref_loop', t2-t1)
  return rf0 
Example #13
Source File: parametric_GP.py    From ParametricGP with MIT License 6 votes vote down vote up
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.y_batch = fetch_minibatch(self.X,self.y,self.N_batch) 
            
            # Compute likelihood and gradients 
            nlml, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        nlml, D_NLML = NLML(self.hyp) 
Example #14
Source File: test_0202_log_interp_vv.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_log_interp_vv_speed(self):
    """ Test the interpolation facility for an array arguments from the class log_interp_c """
    rr,pp = funct_log_mesh(1024, 0.01, 200.0)
    lgi = log_interp_c(rr)

    gcs = np.array([1.2030, 3.2030, 0.7, 10.0, 5.3])
    ff = np.array([[np.exp(-gc*r**2) for r in rr] for gc in gcs])

    rr = np.linspace(0.05, 250.0, 2000000)
    t1 = timer()
    fr2yy = lgi.interp_rcut(ff, rr, rcut=16.0)
    t2 = timer()
    #print(__name__, 't2-t1: ', t2-t1)
    yyref = np.exp(-(gcs.reshape(gcs.size,1)) * (rr.reshape(1,rr.size)**2))
      
    self.assertTrue(np.allclose(fr2yy, yyref) ) 
Example #15
Source File: test_0031_rsh_vec.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_rsh_vec(self):
    """ Compute real spherical harmonics via a vectorized algorithm """
    from pyscf.nao.m_rsphar_libnao import rsphar_exp_vec as rsphar_exp_vec_libnao
    from pyscf.nao.m_rsphar_vec import rsphar_vec as rsphar_vec_python
    from timeit import default_timer as timer
    
    ll = [0,1,2,3,4]
    crds = np.random.rand(20000, 3)
    for lmax in ll:
      t1 = timer()
      rsh1 = rsphar_exp_vec_libnao(crds.T, lmax)
      t2 = timer(); tpython = (t2-t1); t1 = timer()
      
      rsh2 = rsphar_vec_libnao(crds, lmax)
      t2 = timer(); tlibnao = (t2-t1); t1 = timer()
      
      #print( abs(rsh1.T-rsh2).sum(), tpython, tlibnao)
#      print( rsh1[1,:])
#      print( rsh2[1,:]) 
Example #16
Source File: test_0203_log_interp_speed_ram.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_log_interp_vv_speed_and_space(self):
    """ Test the interpolation facility for an array arguments from the class log_interp_c """
    rr,pp = funct_log_mesh(1024, 0.01, 200.0)
    lgi = log_interp_c(rr)

    gcs = np.array([1.2030, 3.2030, 0.7, 10.0, 5.3])
    ff = np.array([[np.exp(-gc*r**2) for r in rr] for gc in gcs])

    rrs = np.linspace(0.05, 250.0, 2000000)
    t1 = timer()
    fr2yy1 = lgi.interp_csr(ff, rrs, rcut=16.0)
    t2 = timer()
    
    #print(__name__, 't1: ', t2-t1)
    #print(fr2yy1.shape, fr2yy1.size)
    yyref = np.exp(-(gcs.reshape(gcs.size,1)) * (rrs.reshape(1,rrs.size)**2))
  
    self.assertTrue(np.allclose(fr2yy1.toarray(), yyref) ) 
Example #17
Source File: test_0205_matelem_ram.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_matelem_speed(self):
    """ Test the computation of atomic orbitals in coordinate space """
    
    dname = os.path.dirname(os.path.abspath(__file__))
    mf = mf_c(verbosity=0, label='water', cd=dname, gen_pb=False, force_gamma=True, Ecut=50)
    g = mf.mesh3d.get_3dgrid()
    t0 = timer()
    vna = mf.vna(g.coords)
    t1 = timer()
    ab2v1 = mf.matelem_int3d_coo(g, vna)
    t2 = timer()
    ab2v2 = mf.matelem_int3d_coo_ref(g, vna)
    t3 = timer()
    #print(__name__, 't1 t2: ', t1-t0, t2-t1, t3-t2)
    #print(abs(ab2v1.toarray()-ab2v2.toarray()).sum()/ab2v2.size, (abs(ab2v1.toarray()-ab2v2.toarray()).max()))
        
    self.assertTrue(np.allclose(ab2v1.toarray(), ab2v2.toarray())) 
Example #18
Source File: test_0204_ao_eval_speed.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_ao_eval_speed(self):
    """ Test the computation of atomic orbitals in coordinate space """
    
    dname = os.path.dirname(os.path.abspath(__file__))
    mf = mf_c(verbosity=0, label='water', cd=dname, gen_pb=False, force_gamma=True, Ecut=20)
    g = mf.mesh3d.get_3dgrid()
    t0 = timer()
    oc2v1 = mf.comp_aos_den(g.coords)
    t1 = timer()
    oc2v2 = mf.comp_aos_py(g.coords)
    t2 = timer()
    
    print(__name__, 't1 t2: ', t1-t0, t2-t1)
    
    print(abs(oc2v1-oc2v2).sum()/oc2v2.size, (abs(oc2v1-oc2v2).max()))
        
    self.assertTrue(np.allclose(oc2v1, oc2v2, atol=3.5e-5)) 
Example #19
Source File: test_0078_vhartree_pbc_water.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def test_0078_vhartree_pbc_water(self):
    """ Test Hartree potential on equidistant grid with Periodic Boundary Conditions """
    import os
    dname = os.path.dirname(os.path.abspath(__file__))
    mf = mf_c(label='water', cd=dname, gen_pb=False, Ecut=100.0)
    d = abs(np.dot(mf.ucell_mom(), mf.ucell)-(2*np.pi)*np.eye(3)).sum()
    self.assertTrue(d<1e-15)
    g = mf.mesh3d.get_3dgrid()
    dens = mf.dens_elec(g.coords, mf.make_rdm1()).reshape(mf.mesh3d.shape)
    ts = timer()
    vh = mf.vhartree_pbc(dens)
    tf = timer()
    #print(__name__, tf-ts)
    E_Hartree = 0.5*(vh*dens*g.weights).sum()*HARTREE2EV
    self.assertAlmostEqual(E_Hartree, 382.8718239023864)
    # siesta:       Hartree =     382.890331 
Example #20
Source File: prod_basis.py    From pyscf with Apache License 2.0 6 votes vote down vote up
def init_prod_basis_pp(self, sv, **kvargs):
    """ Talman's procedure should be working well with Pseudo-Potential starting point."""
    from pyscf.nao.m_prod_biloc import prod_biloc_c

    #t1 = timer()    
    self.init_inp_param_prod_log_dp(sv, **kvargs)
    data = self.chain_data()
    libnao.init_vrtx_cc_apair(data.ctypes.data_as(POINTER(c_double)), c_int64(len(data)))
    self.sv_pbloc_data = True
    
    #t2 = timer(); print(t2-t1); t1=timer();
    self.bp2info = [] # going to be some information including indices of atoms, list of contributing centres, conversion coefficients
    for ia1 in range(sv.natoms):
      rc1 = sv.ao_log.sp2rcut[sv.atom2sp[ia1]]
      for ia2 in range(ia1+1,sv.natoms):
        rc2,dist = sv.ao_log.sp2rcut[sv.atom2sp[ia2]], sqrt(((sv.atom2coord[ia1]-sv.atom2coord[ia2])**2).sum())
        if dist>rc1+rc2 : continue
        pbiloc = self.comp_apair_pp_libint(ia1,ia2)
        if pbiloc is not None : self.bp2info.append(pbiloc)
    
    self.dpc2s,self.dpc2t,self.dpc2sp = self.init_c2s_domiprod() # dominant product's counting
    self.npdp = self.dpc2s[-1]
    self.norbs = self.sv.norbs
    return self 
Example #21
Source File: utils.py    From spectacles with MIT License 6 votes vote down vote up
def log_duration(fn: Callable):
    functools.wraps(fn)

    def timed_function(*args, **kwargs):
        start_time = timeit.default_timer()
        try:
            result = fn(*args, **kwargs)
        finally:
            elapsed = timeit.default_timer() - start_time
            elapsed_str = human_readable(elapsed)
            message_detail = get_detail(fn.__name__)

            logger.info(f"Completed {message_detail}validation in {elapsed_str}.\n")
        return result

    return timed_function 
Example #22
Source File: test_framework.py    From python-test-framework with MIT License 6 votes vote down vote up
def _timed_block_factory(opening_text):
    from timeit import default_timer as timer
    from traceback import format_exception
    from sys import exc_info

    def _timed_block_decorator(s, before=None, after=None):
        display(opening_text, s)

        def wrapper(func):
            if callable(before):
                before()
            time = timer()
            try:
                func()
            except AssertionError as e:
                display('FAILED', str(e))
            except Exception:
                fail('Unexpected exception raised')
                tb_str = ''.join(format_exception(*exc_info()))
                display('ERROR', tb_str)
            display('COMPLETEDIN', '{:.2f}'.format((timer() - time) * 1000))
            if callable(after):
                after()
        return wrapper
    return _timed_block_decorator 
Example #23
Source File: trade.py    From ConvLab with MIT License 6 votes vote down vote up
def test_update():
    # lower case, tokenized.
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    trade_tracker = TRADETracker()
    trade_tracker.init_session()
    trade_tracker.state['history'] = [
        ['null', 'i am trying to find an restaurant in the center'],
        ['the cambridge chop is an good restaurant']
    ]
    from timeit import default_timer as timer
    start = timer()
    pprint(trade_tracker.update('what is the area ?'))
    end = timer()
    print(end - start)

    start = timer()
    pprint(trade_tracker.update('what is the area '))
    end = timer()
    print(end - start) 
Example #24
Source File: v1_validation.py    From Attentive-Filtering-Network with MIT License 6 votes vote down vote up
def best_eer(true_labels, predictions):

    def f_neg(threshold):
        ## Scipy tries to minimize the function
        return compute_eer(true_labels, predictions >= threshold)

    # Initialization of best threshold search
    thr_0 = [0.20] * 1 # binary class
    constraints = [(0.,1.)] * 1 # binary class
    def bounds(**kwargs):
        x = kwargs["x_new"]
        tmax = bool(np.all(x <= 1))
        tmin = bool(np.all(x >= 0))
        return tmax and tmin

    # Search using L-BFGS-B, the epsilon step must be big otherwise there is no gradient
    minimizer_kwargs = {"method": "L-BFGS-B",
                        "bounds":constraints,
                        "options":{
                            "eps": 0.05
                            }
                       }

    # We combine L-BFGS-B with Basinhopping for stochastic search with random steps
    logger.info("===> Searching optimal threshold for each label")
    start_time = timer()

    opt_output = basinhopping(f_neg, thr_0,
                                stepsize = 0.1,
                                minimizer_kwargs=minimizer_kwargs,
                                niter=10,
                                accept_test=bounds)

    end_time = timer()
    logger.info("===> Optimal threshold for each label:\n{}".format(opt_output.x))
    logger.info("Threshold found in: %s seconds" % (end_time - start_time))

    score = opt_output.fun
    return score, opt_output.x 
Example #25
Source File: steppertest.py    From rpi-film-capture with MIT License 5 votes vote down vote up
def start_photo(self):
        ttimes=self.triggertimes
        ptimes=self.phototimes
        trig=self.triggertime
        qlen=self.qlen
        headroom=self.smart_headroom/100.0
        start=timer()
        if trig:
            ttimes.appendleft(start-trig)
        self.triggertime = start
        #if we have a full set of intervals, calculate average and adjust motor
        if len(ttimes) == qlen and len(ptimes) == qlen:
            tavg=sum(ttimes)/qlen
            pavg=sum(ptimes)/qlen
            avgGap=tavg-pavg
            lastGap=ttimes[0]-ptimes[0]
            neededGap=tavg*headroom
            diff=avgGap-neededGap
            diffpct=diff/headroom  #this how far off the headroom we are, as a fraction of that headroom
            logging.debug(str(tavg)+" "+str(pavg)+" "+str(lastGap)+" "+str(diffpct))
            if lastGap<neededGap*.8 or diffpct<-.1:
                logging.debug("Way Fast")
                self.motor_set_speed(self.speed-7)  #if the last frame or avg is way off 
            elif lastGap<neededGap*.9 or diffpct<0:
                logging.debug("Fast")
                self.motor_set_speed(self.speed-1) #if we're just barely under required gap
            elif diffpct>.5:  #if we're well over, speed up aggressively
                logging.debug("Way Slow")
                self.motor_set_speed(self.speed+2)
            elif diffpct>.2: #if we're close, tweak it
                logging.debug("Slow")
                self.motor_set_speed(self.speed+1) 
Example #26
Source File: util.py    From misp42splunk with GNU Lesser General Public License v3.0 5 votes vote down vote up
def __getitem__(self, key):
        item = dict.__getitem__(self, key)
        item.timestamp = timeit.default_timer()
        return item.value 
Example #27
Source File: prepare_segmented_dataset_swbd.py    From pase with MIT License 5 votes vote down vote up
def copy_folder(in_folder, out_folder):
    if not os.path.isdir(out_folder):
        print('Replicating dataset structure...')
        beg_t = timer()
        shutil.copytree(in_folder, out_folder, ignore=ig_f)
        end_t = timer()
        print('Replicated structure in {:.1f} s'.format(end_t - beg_t)) 
Example #28
Source File: prepare_segmented_dataset_ami.py    From pase with MIT License 5 votes vote down vote up
def copy_folder(in_folder, out_folder):
    if not os.path.isdir(out_folder):
        print('Replicating dataset structure...')
        beg_t = timer()
        shutil.copytree(in_folder, out_folder, ignore=ig_f)
        end_t = timer()
        print('Replicated structure in {:.1f} s'.format(end_t - beg_t)) 
Example #29
Source File: knn.py    From pase with MIT License 5 votes vote down vote up
def main(opts):
    # find npy files in data dir
    with open(opts.data_cfg, 'r') as cfg_f:
        # contains train and test files
        cfg = json.load(cfg_f)
        train_X, train_Y, spk2idx = load_train_files(opts.data_root,
                                                     cfg, 'train')
        test_X, test_Y = load_test_files(opts.data_root, cfg)
        print('Loaded trainX: ', train_X.shape)
        print('Loaded trainY: ', train_Y.shape)
        neigh = KNeighborsClassifier(n_neighbors=opts.k, n_jobs=opts.n_jobs)
        neigh.fit(train_X, train_Y) 
        accs = []
        timings = []
        beg_t = timeit.default_timer()
        for te_idx in range(len(test_X)):
            test_x = test_X[te_idx]
            facc = []
            preds = [0.] * len(spk2idx)
            Y_ = neigh.predict(test_x)
            for ii in range(len(Y_)):
                preds[Y_[ii]] += 1
            y_ = np.argmax(preds, axis=0)
            y = test_Y[te_idx]
            if y_ == y:
                accs.append(1)
            else:
                accs.append(0.)
            end_t = timeit.default_timer()
            timings.append(end_t - beg_t)
            beg_t = timeit.default_timer()
            print('Processing test utterance {}/{}, muttime: {:.3f} s'
                  ''.format(te_idx + 1,
                            len(test_X),
                            np.mean(timings)))
        print('Score on {} samples: {}'.format(len(accs),
                                               np.mean(accs)))
        with open(opts.out_log, 'w') as out_f:
            out_f.write('{:.4f}'.format(np.asscalar(np.mean(accs)))) 
Example #30
Source File: random_fire_generator.py    From fire with Apache License 2.0 5 votes vote down vote up
def generate_random_fires(fire_schemas, n=100):
    """
    Given a list of fire product schemas (account, loan, derivative_cash_flow,
    security), generate random data and associated random relations (customer,
    issuer, collateral, etc.)

    TODO: add config to set number of products, min/max for dates etc.

    TODO: add relations
    """
    batches = []
    start_time = timeit.default_timer()

    for fire_schema in fire_schemas:
        f = open(fire_schema, "r")
        schema = json.load(f)
        data_type = fire_schema.split("/")[-1].split(".json")[0]
        data = generate_product_fire(schema, data_type, n)
        batches.append(data)

    end_time = timeit.default_timer() - start_time
    logging.warn(
        "Generating FIRE batches and writing to files"
        " took {} seconds".format(end_time)
    )
    # logging.warn(batches)
    return batches