Python time.ctime() Examples

The following are code examples for showing how to use time.ctime(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: mycode   Author: gmraabe   File: iss_tracking.py    GNU General Public License v3.0 6 votes vote down vote up
def isspass(userlat, userlon):
    passiss = 'http://api.open-notify.org/iss-pass.json'
    passiss = passiss + '?lat=' + str(userlat) + '&lon=' + str(userlon)
    response = urllib.request.urlopen(passiss)
    result = json.loads(response.read())
    # print(result) ## uncomment to see the downloaded result

    over = result['response'][0]['risetime']

    style = ('Arial', 6, 'bold')
    mylocation.write(time.ctime(over), font=style)

    print('The next five passes over ' + str(yellowlat) + ' ' + str(yellowlon))
    print('Pass 1 = ' + time.ctime(result['response'][0]['risetime']))
    print('Pass 2 = ' + time.ctime(result['response'][1]['risetime']))
    print('Pass 3 = ' + time.ctime(result['response'][2]['risetime']))
    print('Pass 4 = ' + time.ctime(result['response'][3]['risetime']))
    print('Pass 5 = ' + time.ctime(result['response'][4]['risetime']))



## Get user location 
Example 2
Project: pyblish-win   Author: pyblish   File: pstats.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def load_stats(self, arg):
        if not arg:  self.stats = {}
        elif isinstance(arg, basestring):
            f = open(arg, 'rb')
            self.stats = marshal.load(f)
            f.close()
            try:
                file_stats = os.stat(arg)
                arg = time.ctime(file_stats.st_mtime) + "    " + arg
            except:  # in case this is not unix
                pass
            self.files = [ arg ]
        elif hasattr(arg, 'create_stats'):
            arg.create_stats()
            self.stats = arg.stats
            arg.stats = {}
        if not self.stats:
            raise TypeError("Cannot create or construct a %r object from %r"
                            % (self.__class__, arg))
        return 
Example 3
Project: pyblish-win   Author: pyblish   File: generator.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def flatten(self, msg, unixfrom=False):
        """Print the message object tree rooted at msg to the output file
        specified when the Generator instance was created.

        unixfrom is a flag that forces the printing of a Unix From_ delimiter
        before the first object in the message tree.  If the original message
        has no From_ delimiter, a `standard' one is crafted.  By default, this
        is False to inhibit the printing of any From_ delimiter.

        Note that for subobjects, no From_ line is printed.
        """
        if unixfrom:
            ufrom = msg.get_unixfrom()
            if not ufrom:
                ufrom = 'From nobody ' + time.ctime(time.time())
            print >> self._fp, ufrom
        self._write(msg) 
Example 4
Project: aospy   Author: spencerahill   File: calc.py    Apache License 2.0 6 votes vote down vote up
def load(self, dtype_out_time, dtype_out_vert=False, region=False,
             plot_units=False, mask_unphysical=False):
        """Load the data from the object if possible or from disk."""
        msg = ("Loading data from disk for object={0}, dtype_out_time={1}, "
               "dtype_out_vert={2}, and region="
               "{3}".format(self, dtype_out_time, dtype_out_vert, region))
        logging.info(msg + ' ({})'.format(ctime()))
        # Grab from the object if its there.
        try:
            data = self.data_out[dtype_out_time]
        except (AttributeError, KeyError):
            # Otherwise get from disk.  Try scratch first, then archive.
            try:
                data = self._load_from_disk(dtype_out_time, dtype_out_vert,
                                            region=region)
            except IOError:
                data = self._load_from_tar(dtype_out_time, dtype_out_vert)
        # Copy the array to self.data_out for ease of future access.
        self._update_data_out(data, dtype_out_time)
        # Apply desired plotting/cleanup methods.
        if mask_unphysical:
            data = self.var.mask_unphysical(data)
        if plot_units:
            data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
        return data 
Example 5
Project: StructEngPy   Author: zhuoju36   File: Logger.py    MIT License 6 votes vote down vote up
def info(log:str,target='console'):
    """
    log: text to record.
    target: 'console' to print log on screen or file to write in. 
    """
    if target=='console':
        thd=threading.Thread(target=print,args=(ctime(),':',log))
        thd.setDaemon(True)
        thd.start()
        thd.join()
    else:
        try:
            thd=threading.Thread(target=print,args=(ctime(),':',log))
            thd.setDaemon(True)
            thd.start()
            thd.join()
        except Exception as e:
            print(e) 
Example 6
Project: deep-learning-note   Author: wdxtub   File: 3_thread_wayB.py    MIT License 6 votes vote down vote up
def main():
    print('程序开始,当前时间', ctime())
    threads = []
    nloops = range(len(loops))

    for i in nloops:
        t = MyThread(loop, (i, loops[i]))
        threads.append(t)

    for i in nloops:
        threads[i].start()

    for i in nloops:
        threads[i].join()  # 等待进程完成

    print('程序结束,当前时间', ctime()) 
Example 7
Project: deep-learning-note   Author: wdxtub   File: 2_thread_wayA.py    MIT License 6 votes vote down vote up
def main():
    print('程序开始,当前时间', ctime())
    threads = []
    nloops = range(len(loops))

    for i in nloops:
        t = threading.Thread(target=loop, args=(i, loops[i]))
        threads.append(t)

    for i in nloops:
        threads[i].start()

    for i in nloops:
        threads[i].join() # 等待进程完成

    print('程序结束,当前时间', ctime()) 
Example 8
Project: deep-learning-note   Author: wdxtub   File: 4_thread_fac_fib.py    MIT License 6 votes vote down vote up
def main():
    nfuncs = range(len(funcs))
    print('*** 单线程')
    for i in nfuncs:
        print('starting', funcs[i].__name__, 'at:', ctime())
        print(funcs[i](n))
        print(funcs[i].__name__, 'finished at:', ctime())

    print('\n*** 多线程')
    threads = []
    for i in nfuncs:
        t = MyThread(funcs[i], (n,), funcs[i].__name__)
        threads.append(t)

    for i in nfuncs:
        threads[i].start()

    for i in nfuncs:
        threads[i].join()
        print(threads[i].getResult())
    print('ALL DONE') 
Example 9
Project: PFMonitor   Author: TomHacker   File: monitor.py    MIT License 6 votes vote down vote up
def monitor(self):
        current_model_list = os.listdir(self.model_path)
        from pushbullet import PushBullet
        pb = PushBullet(api_key=self.api_key)
        res1 = self.get_model_status(self.model_path, time.ctime())
        res2, status = self.get_process_status(self.pid)
        pb.push_note(title='神经网络进程运行情况监视器 made by 李帅', body=res1 + res2)
        while True:
            now_model_list = os.listdir(self.model_path)
            if now_model_list == current_model_list:
                res2, status = self.get_process_status(self.pid)
                if status == 'fail':
                    pb.push_note(title='神经网络进程运行情况监视器 made by 李帅', body='运行结束或者异常,请检查电脑运行情况!!!!!!!')
                    break
                else:
                    pass
            else:
                res1 = self.get_model_status(self.model_path, time.ctime())
                res2, status = self.get_process_status(self.pid)
                current_model_list = now_model_list
                if status == 'fail':
                    pb.push_note(title='神经网络进程运行情况监视器 made by 李帅', body='运行结束或者异常,请检查电脑运行情况!!!!!!!')
                    break
                else:
                    pb.push_note(title='神经网络进程运行情况监视器 made by 李帅', body=res1 + res2) 
Example 10
Project: Repobot   Author: Desgard   File: ultratb.py    MIT License 6 votes vote down vote up
def prepare_header(self, etype, long_version=False):
        colors = self.Colors  # just a shorthand + quicker name lookup
        colorsnormal = colors.Normal  # used a lot
        exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
        width = min(75, get_terminal_size()[0])
        if long_version:
            # Header with the exception type, python version, and date
            pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
            date = time.ctime(time.time())

            head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal,
                                           exc, ' ' * (width - len(str(etype)) - len(pyver)),
                                           pyver, date.rjust(width) )
            head += "\nA problem occurred executing Python code.  Here is the sequence of function" \
                    "\ncalls leading up to the error, with the most recent (innermost) call last."
        else:
            # Simplified header
            head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
                             rjust(width - len(str(etype))) )

        return head 
Example 11
Project: razzy-spinner   Author: rafasashi   File: chunkparser_app.py    GNU General Public License v3.0 6 votes vote down vote up
def save_grammar(self, filename=None):
        if not filename:
            ftypes = [('Chunk Gramamr', '.chunk'),
                      ('All files', '*')]
            filename = tkinter.filedialog.asksaveasfilename(filetypes=ftypes,
                                                      defaultextension='.chunk')
            if not filename: return
        if (self._history and self.normalized_grammar ==
            self.normalize_grammar(self._history[-1][0])):
            precision, recall, fscore = ['%.2f%%' % (100*v) for v in
                                         self._history[-1][1:]]
        elif self.chunker is None:
            precision = recall = fscore = 'Grammar not well formed'
        else:
            precision = recall = fscore = 'Not finished evaluation yet'

        with open(filename, 'w') as outfile:
            outfile.write(self.SAVE_GRAMMAR_TEMPLATE % dict(
                date=time.ctime(), devset=self.devset_name,
                precision=precision, recall=recall, fscore=fscore,
                grammar=self.grammar.strip())) 
Example 12
Project: razzy-spinner   Author: rafasashi   File: weka.py    GNU General Public License v3.0 6 votes vote down vote up
def header_section(self):
        """Returns an ARFF header as a string."""
        # Header comment.
        s = ('% Weka ARFF file\n' +
             '% Generated automatically by NLTK\n' +
             '%% %s\n\n' % time.ctime())

        # Relation name
        s += '@RELATION rel\n\n'

        # Input attribute specifications
        for fname, ftype in self._features:
            s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)

        # Label attribute specification
        s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))

        return s 
Example 13
Project: QCANet   Author: funalab   File: utils.py    MIT License 6 votes vote down vote up
def createOpbase(self, opbase):
        if (opbase[len(opbase) - 1] == self.psep):
            opbase = opbase[:len(opbase) - 1]
        if not (opbase[0] == self.psep):
            if (opbase.find('./') == -1):
                opbase = './' + opbase
        t = time.ctime().split(' ')
        if t.count('') == 1:
            t.pop(t.index(''))
        opbase = opbase + '_' + t[1] + t[2] + t[0] + '_' + t[4] + '_' + t[3].split(':')[0] + t[3].split(':')[1] + t[3].split(':')[2]
        if not (pt.exists(opbase)):
            os.mkdir(opbase)
            print('Output Directory not exist! Create...')
        print('Output Directory: {}'.format(opbase))
        self.opbase = opbase
        return opbase 
Example 14
Project: gusto   Author: firedrakeproject   File: state.py    MIT License 6 votes vote down vote up
def __init__(self, filename, diagnostics, description, comm, create=True):
        """Create a dump file that stores diagnostics.

        :arg filename: The filename.
        :arg diagnostics: The :class:`Diagnostics` object.
        :arg description: A description.
        :kwarg create: If False, assume that filename already exists
        """
        self.filename = filename
        self.diagnostics = diagnostics
        self.comm = comm
        if not create:
            return
        if self.comm.rank == 0:
            with Dataset(filename, "w") as dataset:
                dataset.description = "Diagnostics data for simulation {desc}".format(desc=description)
                dataset.history = "Created {t}".format(t=time.ctime())
                dataset.source = "Output from Gusto model"
                dataset.createDimension("time", None)
                var = dataset.createVariable("time", np.float64, ("time", ))
                var.units = "seconds"
                for name in diagnostics.fields:
                    group = dataset.createGroup(name)
                    for diagnostic in diagnostics.available_diagnostics:
                        group.createVariable(diagnostic, np.float64, ("time", )) 
Example 15
Project: PythonClassBook   Author: PythonClassRoom   File: fig19_15.py    GNU General Public License v3.0 6 votes vote down vote up
def run( self ):
      """Vehicle waits unless/until light is green"""

      # stagger arrival times
      time.sleep( random.randrange( 1, 10 ) )

      # prints arrival time of car at intersection
      print "%s arrived at %s" % \
         ( self.getName(), time.ctime( time.time() ) )

      # wait for green light
      self.threadEvent.wait()

      # displays time that car departs intersection
      print "%s passes through intersection at %s" % \
         ( self.getName(), time.ctime( time.time() ) ) 
Example 16
Project: DRCOG_Urbansim   Author: apdjustino   File: logger.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def start_block(self, name='Unnamed block', verbose=True, tags=[], verbosity_level=3):
        """
        Starts a logger 'block'.  If in verbose mode, prints the current datetime.
        All logger messages until the next call to end_block() will
        be indented to show that they are contained in this block.
        """
        start_memory = self._start_log_memory()
        if self._should_log(tags, verbosity_level):    
            if (verbose):
                self._start_block_msg = name + ": started on " + time.ctime()
            else:
                self._start_block_msg = name
            self._write(self._start_block_msg)
            self._current_level += 1
            self._has_indent = False
            
        start_time = time.time()
        block_stack_item = (name, start_time, start_memory, tags, verbosity_level, self._show_exact_time, self._is_logging_memory)
        self._block_stack.append(block_stack_item)
        return block_stack_item 
Example 17
Project: asn1tools   Author: eerimoq   File: __init__.py    MIT License 6 votes vote down vote up
def generate(compiled, codec):
    """Generate Rust source code from given compiled specification.

    """

    date = time.ctime()

    if codec == 'uper':
        helpers, types_code = uper.generate(compiled)
    else:
        raise Exception()

    source = SOURCE_FMT.format(version=__version__,
                               date=date,
                               helpers=helpers,
                               types_code=types_code)

    return source 
Example 18
Project: OpenBottle   Author: xiaozhuchacha   File: chunkparser_app.py    MIT License 6 votes vote down vote up
def save_grammar(self, filename=None):
        if not filename:
            ftypes = [('Chunk Gramamr', '.chunk'),
                      ('All files', '*')]
            filename = tkinter.filedialog.asksaveasfilename(filetypes=ftypes,
                                                      defaultextension='.chunk')
            if not filename: return
        if (self._history and self.normalized_grammar ==
            self.normalize_grammar(self._history[-1][0])):
            precision, recall, fscore = ['%.2f%%' % (100*v) for v in
                                         self._history[-1][1:]]
        elif self.chunker is None:
            precision = recall = fscore = 'Grammar not well formed'
        else:
            precision = recall = fscore = 'Not finished evaluation yet'

        with open(filename, 'w') as outfile:
            outfile.write(self.SAVE_GRAMMAR_TEMPLATE % dict(
                date=time.ctime(), devset=self.devset_name,
                precision=precision, recall=recall, fscore=fscore,
                grammar=self.grammar.strip())) 
Example 19
Project: OpenBottle   Author: xiaozhuchacha   File: weka.py    MIT License 6 votes vote down vote up
def header_section(self):
        """Returns an ARFF header as a string."""
        # Header comment.
        s = ('% Weka ARFF file\n' +
             '% Generated automatically by NLTK\n' +
             '%% %s\n\n' % time.ctime())

        # Relation name
        s += '@RELATION rel\n\n'

        # Input attribute specifications
        for fname, ftype in self._features:
            s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)

        # Label attribute specification
        s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))

        return s 
Example 20
Project: OpenBottle   Author: xiaozhuchacha   File: chunkparser_app.py    MIT License 6 votes vote down vote up
def save_grammar(self, filename=None):
        if not filename:
            ftypes = [('Chunk Gramamr', '.chunk'),
                      ('All files', '*')]
            filename = tkinter.filedialog.asksaveasfilename(filetypes=ftypes,
                                                      defaultextension='.chunk')
            if not filename: return
        if (self._history and self.normalized_grammar ==
            self.normalize_grammar(self._history[-1][0])):
            precision, recall, fscore = ['%.2f%%' % (100*v) for v in
                                         self._history[-1][1:]]
        elif self.chunker is None:
            precision = recall = fscore = 'Grammar not well formed'
        else:
            precision = recall = fscore = 'Not finished evaluation yet'

        with open(filename, 'w') as outfile:
            outfile.write(self.SAVE_GRAMMAR_TEMPLATE % dict(
                date=time.ctime(), devset=self.devset_name,
                precision=precision, recall=recall, fscore=fscore,
                grammar=self.grammar.strip())) 
Example 21
Project: pyblish-win   Author: pyblish   File: diff.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def main():

    usage = "usage: %prog [options] fromfile tofile"
    parser = optparse.OptionParser(usage)
    parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
    parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
    parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
    parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
    parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
    (options, args) = parser.parse_args()

    if len(args) == 0:
        parser.print_help()
        sys.exit(1)
    if len(args) != 2:
        parser.error("need to specify both a fromfile and tofile")

    n = options.lines
    fromfile, tofile = args

    fromdate = time.ctime(os.stat(fromfile).st_mtime)
    todate = time.ctime(os.stat(tofile).st_mtime)
    fromlines = open(fromfile, 'U').readlines()
    tolines = open(tofile, 'U').readlines()

    if options.u:
        diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
    elif options.n:
        diff = difflib.ndiff(fromlines, tolines)
    elif options.m:
        diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
    else:
        diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)

    sys.stdout.writelines(diff) 
Example 22
Project: pyblish-win   Author: pyblish   File: urllib2.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def get_cnonce(self, nonce):
        # The cnonce-value is an opaque
        # quoted string value provided by the client and used by both client
        # and server to avoid chosen plaintext attacks, to provide mutual
        # authentication, and to provide some message integrity protection.
        # This isn't a fabulous effort, but it's probably Good Enough.
        dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
                                            randombytes(8))).hexdigest()
        return dig[:16] 
Example 23
Project: pyblish-win   Author: pyblish   File: test_datetime.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_ctime(self):
        t = self.theclass(2002, 3, 2)
        self.assertEqual(t.ctime(), "Sat Mar  2 00:00:00 2002") 
Example 24
Project: pyblish-win   Author: pyblish   File: test_datetime.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_more_ctime(self):
        # Test fields that TestDate doesn't touch.
        import time

        t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
        self.assertEqual(t.ctime(), "Sat Mar  2 18:03:05 2002")
        # Oops!  The next line fails on Win2K under MSVC 6, so it's commented
        # out.  The difference is that t.ctime() produces " 2" for the day,
        # but platform ctime() produces "02" for the day.  According to
        # C99, t.ctime() is correct here.
        # self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))

        # So test a case where that difference doesn't matter.
        t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
        self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple()))) 
Example 25
Project: pyblish-win   Author: pyblish   File: test_time.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_conversions(self):
        self.assertTrue(time.ctime(self.t)
                     == time.asctime(time.localtime(self.t)))
        self.assertTrue(long(time.mktime(time.localtime(self.t)))
                     == long(self.t)) 
Example 26
Project: pyblish-win   Author: pyblish   File: test_time.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_insane_timestamps(self):
        # It's possible that some platform maps time_t to double,
        # and that this test will fail there.  This test should
        # exempt such platforms (provided they return reasonable
        # results!).
        for func in time.ctime, time.gmtime, time.localtime:
            for unreasonable in -1e200, 1e200:
                self.assertRaises(ValueError, func, unreasonable) 
Example 27
Project: lora-sx1276   Author: raspberrypi-tw   File: gw_tx.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def on_rx_done(self):
        print("\nRxDone")

        payload = self.read_payload(nocheck=True)
        data = ''.join([chr(c) for c in payload])
        print("Time: {}".format(str(time.ctime())))
        print("Raw RX: {}".format(data))

        try:
            _length, _data = packer.Unpack_Str(data)
            print("Time: {}".format( str(time.ctime() )))
            print("Length: {}".format( _length ))
            print("Receive: {}".format( _data ))
        except:
            print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
            print("Non-hexadecimal digit found...")
            print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
            print("Receive: {}".format( data))


        # set TX
        self.rx_done = True
        # comment it will receive countinous
        self.set_dio_mapping([1,0,0,0,0,0])    # TX
        self.set_mode(MODE.STDBY)
        self.clear_irq_flags(TxDone=1) 
Example 28
Project: aospy   Author: spencerahill   File: calc.py    Apache License 2.0 5 votes vote down vote up
def _print_verbose(*args):
        """Print diagnostic message."""
        try:
            return '{0} {1} ({2})'.format(args[0], args[1], ctime())
        except IndexError:
            return '{0} ({1})'.format(args[0], ctime()) 
Example 29
Project: sump2   Author: blackmesalabs   File: sump2.py    GNU General Public License v3.0 5 votes vote down vote up
def build_header( self, module_name ):
    import time;
    now = time.time();
    means = time.ctime( now );
    rts  = [];
    rts += [ "$date " + means +                  " $end" ];
#   rts += [ "$date      Wed May  4 12:00:00 2018  $end" ];
#   rts += [ "$version   ModelSim Version 6.0c     $end" ];
    rts += [ "$version   sump2.py by BlackMesaLabs $end" ];
    rts += [ "$timescale 1ps                       $end" ];
#   rts += [ "$scope     module   module_name     $end" ];
    rts += [ "$scope     module " + module_name + " $end" ];
    return rts; 
Example 30
Project: deep-learning-note   Author: wdxtub   File: 8_prodcons.py    MIT License 5 votes vote down vote up
def _atexit():
    print("all done at:", ctime()) 
Example 31
Project: deep-learning-note   Author: wdxtub   File: 3_thread_wayB.py    MIT License 5 votes vote down vote up
def loop(nloop, nsec):
    print('Start LOOP', nloop, 'at:', ctime())
    sleep(nsec)
    print('LOOP', nloop, 'DONE at:', ctime()) 
Example 32
Project: deep-learning-note   Author: wdxtub   File: myThread.py    MIT License 5 votes vote down vote up
def run(self):
        print('starting', self.name, 'at:', ctime())
        self.res = self.func(*self.args)
        print(self.name, 'finished at:', ctime()) 
Example 33
Project: deep-learning-note   Author: wdxtub   File: 5_bookrank.py    MIT License 5 votes vote down vote up
def main():
    print('At {} on Amazon...'.format(ctime()))
    for isbn in ISBNs:
        # use multi thread
        Thread(target=_showRanking, args=(isbn,)).start() 
Example 34
Project: deep-learning-note   Author: wdxtub   File: 6_thread_wayC.py    MIT License 5 votes vote down vote up
def loop(nsec):
    myname = currentThread().name
    with lock:
        remaining.add(myname)
        print("[{}] Started {}".format(ctime(), myname))
    sleep(nsec)
    with lock:
        remaining.remove(myname)
        print("[{}] Completed {} ({} secs)".format(ctime(), myname, nsec))
        print("    (remaining: {})".format(remaining or "None")) 
Example 35
Project: deep-learning-note   Author: wdxtub   File: 6_thread_wayC.py    MIT License 5 votes vote down vote up
def _atexit():
    print("all Done at", ctime()) 
Example 36
Project: deep-learning-note   Author: wdxtub   File: 1_one_thread.py    MIT License 5 votes vote down vote up
def loop0():
    print('开始 Loop 0,当前时间', ctime())
    sleep(4)
    print('Loop 0 结束,当前时间', ctime()) 
Example 37
Project: deep-learning-note   Author: wdxtub   File: 1_one_thread.py    MIT License 5 votes vote down vote up
def loop1():
    print('开始 Loop 1,当前时间', ctime())
    sleep(2)
    print('Loop 1 结束,当前时间', ctime()) 
Example 38
Project: deep-learning-note   Author: wdxtub   File: 1_one_thread.py    MIT License 5 votes vote down vote up
def main():
    print('程序开始,当前时间', ctime())
    loop0()
    loop1()
    print('程序结束,当前时间', ctime()) 
Example 39
Project: deep-learning-note   Author: wdxtub   File: 7_candy.py    MIT License 5 votes vote down vote up
def _main():
    print("starging at:", ctime())
    nloops = randrange(2, 6)
    print("THE CANDY MACHINE (full with %d bars)!" % MAX)
    Thread(
        target=consumer, args=(randrange(nloops, nloops + MAX + 2),)
    ).start()  # buyer
    Thread(target=producer, args=(nloops,)).start()  # vndr 
Example 40
Project: deep-learning-note   Author: wdxtub   File: 7_candy.py    MIT License 5 votes vote down vote up
def _atexit():
    print("all done at:", ctime()) 
Example 41
Project: deep-learning-note   Author: wdxtub   File: 7_tsTservTW.py    MIT License 5 votes vote down vote up
def dataReceived(self, data):
        message = '[%s] %s' % (ctime(), data)
        self.transport.write(message.encode()) 
Example 42
Project: autocirc   Author: cherveny   File: autocirc.py    MIT License 5 votes vote down vote up
def errorLogger(severity,textToLog):
	# allow logging of our errors, usually called in except blocks
	print severity,": ",textToLog
	f = open(errorLog,"ab")
	f.write(time.ctime() + severity +  ": " + textToLog + "\n")
	f.close()

##########Report and Email Functions 
Example 43
Project: tmanager   Author: ssh3ll   File: dates.py    MIT License 5 votes vote down vote up
def time_to_ctime(posix_time: float) -> str:
    """
    Transform seconds since epoch time into human readable time.

    :param posix_time: seconds since epoch
    :return: human readable time
    """
    return time.ctime(posix_time) 
Example 44
Project: ProceduralSettlementsInMinecraft   Author: abrightmoore   File: STARTPROC_v1.py    GNU General Public License v3.0 5 votes vote down vote up
def log(message):
	print "STARTPROC",time.ctime(),message

# Block 
Example 45
Project: istihzapython   Author: ozgurturkiye   File: animal.py    GNU General Public License v3.0 5 votes vote down vote up
def add_record(self, name, gender):
        #Open and connect database
        with sqlite3.connect("ciftligim.sqlite") as vt:
            im = vt.cursor()

            animal_id = "---"
            registered = str(time.ctime())
            #We created a tuple to add multiple data
            all_data = (animal_id, name, gender, registered)
            im.execute("""INSERT INTO animals VALUES (?, ?, ?, ?)""", all_data)

            #Commitsiz olmaz :)
            vt.commit() 
Example 46
Project: local-info   Author: ianmiell   File: wsgi.py    MIT License 5 votes vote down vote up
def print_data(data):
	for item in data:
		row = ''
		for field in item:
			if field == 'leaving':
				row += 'at: ' + time.ctime(float(item[field])) + ','
			else:
				row += field + ': ' + item[field] + ', '
		print row 
Example 47
Project: Lofter-image-Crawler   Author: sparrow629   File: LofterCrawler.py    MIT License 5 votes vote down vote up
def BlogDownload(URL):
	Task = []
	global pagelists
	pagelists = [URL]

	nexturl = getnexturl(URL)
	while nexturl:
		pagelists.append(nexturl)
		print nexturl, "Adding page %s" % len(pagelists)
		nexturl = getnexturl(nexturl)
	print pagelists

	pagenumber = len(pagelists)

	for page in range(0,pagenumber):
		url = pagelists[page]
		html = getHtml(url)
		print "Downloading Page %s" % (page + 1)

		PostUrllist = getPost(html)

		task = ThreadTask(PostUrllist)
		Task.append(task)
		print '-'*16,"\nThis is thread %s \n "% page,'-'*16

	for task in Task:
		task.setDaemon(True)
		task.start()
		print(time.ctime(),'thread %s start' % task)
	for task in Task:
		task.join()
	while 1:
		for task in Task:
			if task.is_alive():
				continue
			else:
				Task.remove(task)
				print(time.ctime(),'thread %s is finished' % task)
		if len(Task) == 0:
			break 
Example 48
Project: pyhanlp   Author: hankcs   File: test_multithread.py    Apache License 2.0 5 votes vote down vote up
def run(self):
        print("Starting " + self.thread_name)
        while self.counter:
            time.sleep(1)
            sentence = self.analyzer.analyze("商品和服务")
            print("%s: %s, seg: %s" % (self.thread_name, time.ctime(time.time()), sentence))
            self.counter -= 1 
Example 49
Project: RaiBlocks-Live-TPS   Author: BrianPugh   File: live_tps.py    MIT License 5 votes vote down vote up
def main():
    args, dargs = parse_args()

    print('RaiBlocks TPS Counter')
    server_address = (args.address, args.port)
    block_listener = HTTPServer(server_address, BlockCounterHandler)
    Thread(target=block_listener.serve_forever).start()

    previous_count_pre = 0
    previous_count_post = 0
    while True:
        time.sleep(args.period)

        # Compute TPS
        previous_count_pre = transaction_count
        n_trans_since_prev = transaction_count - previous_count_post
        tps = n_trans_since_prev / args.period
        previous_count_post = previous_count_pre

        if args.human:
            current_time = time.ctime()
        else:
            current_time = "%.4f" % time.time()

        print("Time: %20s    Total: %7d    TPS: %.2f" %
                (current_time, transaction_count, tps)) 
Example 50
Project: torchtrainers   Author: NVlabs   File: trainer.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def get_info():
    import platform
    import getpass
    node = str(platform.node())
    user = getpass.getuser()
    now = time.ctime()
    return "{} {} {}".format(now, node, user) 
Example 51
Project: kat   Author: TOT0RoKR   File: file.py    MIT License 5 votes vote down vote up
def get_modified_time(self, rootdir):
        return time.ctime(os.path.getmtime(rootdir + self.path)) 
Example 52
Project: kat   Author: TOT0RoKR   File: controller.py    MIT License 5 votes vote down vote up
def initialize_database(katconfig):
    kernel_root_dir = vim.vars['KATRootDir'].decode()
    database = kernel_root_dir + '/' + "kat.database"
    katref = kernel_root_dir + '/' + "kat.ref"
    pp_tags = []

    i = 0
    files_nr = len(katconfig['files'])
    for it in katconfig['files']:
        i += 1
        print(str(i) + "/" + str(files_nr) + " - " + it.path)
        filename = kernel_root_dir + '/' + it.path
        with open(filename, "r", encoding="utf-8") as f:
            try:
                raw_data = f.read()
            except UnicodeDecodeError:
                with open(filename, "r", encoding="iso-8859-1") as f2:
                    raw_data = f2.read()
        tokens = pps.scan(raw_data)
        it.scope = Scope(it.path, None, 0, 0)
        tags, _, _ = ppp.parse(tokens, it)
        pp_tags += tags

    database = kernel_root_dir + '/' + "kat.database"
    with open(database, "wb") as f:
        pickle.dump(time.ctime(os.path.getmtime(katref)), f)
        pickle.dump(pp_tags, f)

    return pp_tags 
Example 53
Project: endpoints-tools   Author: cloudendpoints   File: call_service_control.py    Apache License 2.0 5 votes vote down vote up
def call_report(access_token, operation_id, args):
  headers = {"Authorization": "Bearer {}".format(access_token),
             "X-Cloud-Trace-Context": "{};o=1".format(operation_id),
             "Content-Type": "application/json"}
  url = "https://servicecontrol.googleapis.com/v1/services/{}:report".format(args.service_name)
  data_obj = {"service_name": args.service_name,
          "operations": [{
              "operation_id": operation_id,
              "operation_name": "/echo",
              "consumer_id": "api_key:{}".format(args.api_key),
              "start_time": {
                "seconds": int(time.time())
              },
              "end_time": {
                "seconds": int(time.time())
              }
           }]
         }
  data = json.dumps(data_obj)
  t0 = time.time()
  try:
    request = urllib2.Request(url, data, headers)
    response = urllib2.urlopen(request)
    trace_id = response.info().getheader("X-GOOG-TRACE-ID")
#    print "response: {}".format(response.info())
  except urllib2.HTTPError as e:
    print "{} Check failed code: {},  error {}".format(time.ctime(), e.code, e.reason)
    return
  latency = time.time() - t0
  if trace_id and (latency >= 15.0):
    print "{}: report big latency {}, trace_id: {} operation_id: {}".format(time.ctime(), latency, trace_id, operation_id) 
Example 54
Project: endpoints-tools   Author: cloudendpoints   File: call_service_control.py    Apache License 2.0 5 votes vote down vote up
def call_check(access_token, operation_id, args):
  headers = {"Authorization": "Bearer {}".format(access_token),
             "X-Cloud-Trace-Context": "{};o=1".format(operation_id),
             "Content-Type": "application/json"}
  url = "https://servicecontrol.googleapis.com/v1/services/{}:check".format(args.service_name)
  data_obj = {"service_name": args.service_name,
          "operation": {
              "operation_id": operation_id,
              "operation_name": "/echo",
              "consumer_id": "api_key:{}".format(args.api_key),
              "start_time": {
                "seconds": int(time.time())
              }
           }
         }
  data = json.dumps(data_obj)
  t0 = time.time()
  try:
    request = urllib2.Request(url, data, headers)
    response = urllib2.urlopen(request)
    trace_id = response.info().getheader("X-GOOG-TRACE-ID")
#    print "response: {}".format(response.info())
  except urllib2.HTTPError as e:
    print "{} Check failed code: {},  error {}".format(time.ctime(), e.code, e.reason)
    return
  latency = time.time() - t0
  if trace_id and (latency >= 5.0):
    print "{}: check big latency {}, trace_id: {} operation_id: {}".format(time.ctime(), latency, trace_id, operation_id) 
Example 55
Project: Repobot   Author: Desgard   File: request.py    MIT License 5 votes vote down vote up
def get_cnonce(self, nonce):
        # The cnonce-value is an opaque
        # quoted string value provided by the client and used by both client
        # and server to avoid chosen plaintext attacks, to provide mutual
        # authentication, and to provide some message integrity protection.
        # This isn't a fabulous effort, but it's probably Good Enough.
        s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime())
        b = s.encode("ascii") + _randombytes(8)
        dig = hashlib.sha1(b).hexdigest()
        return dig[:16] 
Example 56
Project: AIGame   Author: chenghongkuan   File: receiveThread.py    GNU General Public License v3.0 5 votes vote down vote up
def print_time(threadName, delay, counter):
    while counter:
        if exitFlag:
            (threading.Thread).exit()
        time.sleep(delay)
        print("%s: %s" % (threadName, time.ctime(time.time())))
        counter -= 1 
Example 57
Project: razzy-spinner   Author: rafasashi   File: chunkparser_app.py    GNU General Public License v3.0 5 votes vote down vote up
def save_history(self, filename=None):
        if not filename:
            ftypes = [('Chunk Gramamr History', '.txt'),
                      ('All files', '*')]
            filename = tkinter.filedialog.asksaveasfilename(filetypes=ftypes,
                                                      defaultextension='.txt')
            if not filename: return

        with open(filename, 'w') as outfile:
            outfile.write('# Regexp Chunk Parsing Grammar History\n')
            outfile.write('# Saved %s\n' % time.ctime())
            outfile.write('# Development set: %s\n' % self.devset_name)
            for i, (g, p, r, f) in enumerate(self._history):
                hdr = ('Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, '
                       'fscore=%.2f%%)' % (i+1, len(self._history),
                                           p*100, r*100, f*100))
                outfile.write('\n%s\n' % hdr)
                outfile.write(''.join('  %s\n' % line for line in g.strip().split()))

            if not (self._history and self.normalized_grammar ==
                    self.normalize_grammar(self._history[-1][0])):
                if self.chunker is None:
                    outfile.write('\nCurrent Grammar (not well-formed)\n')
                else:
                    outfile.write('\nCurrent Grammar (not evaluated)\n')
                outfile.write(''.join('  %s\n' % line for line
                                  in self.grammar.strip().split())) 
Example 58
Project: razzy-spinner   Author: rafasashi   File: downloader.py    GNU General Public License v3.0 5 votes vote down vote up
def _log(self, msg):
        self._log_messages.append('%s %s%s' % (time.ctime(),
                                     ' | '*self._log_indent, msg))

    #/////////////////////////////////////////////////////////////////
    # Internals
    #///////////////////////////////////////////////////////////////// 
Example 59
Project: me-ica   Author: ME-ICA   File: io.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def write_geometry(filepath, coords, faces, create_stamp=None):
    """Write a triangular format Freesurfer surface mesh.

    Parameters
    ----------
    filepath : str
        Path to surface file
    coords : numpy array
        nvtx x 3 array of vertex (x, y, z) coordinates
    faces : numpy array
        nfaces x 3 array of defining mesh triangles
    create_stamp : str
        User/time stamp (default: "created by <user> on <ctime>")
    """
    magic_bytes = np.array([255, 255, 254], dtype=np.uint8)

    if create_stamp is None:
        create_stamp = "created by %s on %s" % (getpass.getuser(),
            time.ctime())

    with open(filepath, 'wb') as fobj:
        magic_bytes.tofile(fobj)
        fobj.write("%s\n\n" % create_stamp)

        np.array([coords.shape[0], faces.shape[0]], dtype='>i4').tofile(fobj)

        # Coerce types, just to be safe
        coords.astype('>f4').reshape(-1).tofile(fobj)
        faces.astype('>i4').reshape(-1).tofile(fobj) 
Example 60
Project: me-ica   Author: ME-ICA   File: test_io.py    GNU Lesser General Public License v2.1 5 votes vote down vote up
def test_geometry():
    """Test IO of .surf"""
    surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "inflated"))
    coords, faces = read_geometry(surf_path)
    assert_equal(0, faces.min())
    assert_equal(coords.shape[0], faces.max() + 1)

    # Test quad with sphere
    surf_path = pjoin(data_path, "surf", "%s.%s" % ("lh", "sphere"))
    coords, faces = read_geometry(surf_path)
    assert_equal(0, faces.min())
    assert_equal(coords.shape[0], faces.max() + 1)

    # Test equivalence of freesurfer- and nibabel-generated triangular files
    # with respect to read_geometry()
    with InTemporaryDirectory():
        surf_path = 'test'
        create_stamp = "created by %s on %s" % (getpass.getuser(),
            time.ctime())
        write_geometry(surf_path, coords, faces, create_stamp)

        coords2, faces2 = read_geometry(surf_path)

        with open(surf_path, 'rb') as fobj:
            magic = np.fromfile(fobj, ">u1", 3)
            read_create_stamp = fobj.readline().rstrip('\n')

    assert_equal(create_stamp, read_create_stamp)

    np.testing.assert_array_equal(coords, coords2)
    np.testing.assert_array_equal(faces, faces2)

    # Validate byte ordering
    coords_swapped = coords.byteswap().newbyteorder()
    faces_swapped = faces.byteswap().newbyteorder()
    np.testing.assert_array_equal(coords_swapped, coords)
    np.testing.assert_array_equal(faces_swapped, faces) 
Example 61
Project: proxy_pool   Author: guoxianru   File: ProxyRefreshSchedule.py    Apache License 2.0 5 votes vote down vote up
def validProxy(self):
        """
        验证raw_proxy_queue中的代理, 将可用的代理放入useful_proxy_queue
        :return:
        """
        self.db.changeTable(self.raw_proxy_queue)
        raw_proxy_item = self.db.pop()
        self.log.info('ProxyRefreshSchedule: %s start validProxy' % time.ctime())
        # 计算剩余代理,用来减少重复计算
        remaining_proxies = self.getAll()
        while raw_proxy_item:
            raw_proxy = raw_proxy_item.get('proxy')
            if isinstance(raw_proxy, bytes):
                # 兼容Py3
                raw_proxy = raw_proxy.decode('utf8')

            if (raw_proxy not in remaining_proxies) and validUsefulProxy(raw_proxy):
                self.db.changeTable(self.useful_proxy_queue)
                self.db.put(raw_proxy)
                self.log.info('ProxyRefreshSchedule: %s validation pass' % raw_proxy)
            else:
                self.log.info('ProxyRefreshSchedule: %s validation fail' % raw_proxy)
            self.db.changeTable(self.raw_proxy_queue)
            raw_proxy_item = self.db.pop()
            remaining_proxies = self.getAll()
        self.log.info('ProxyRefreshSchedule: %s validProxy complete' % time.ctime()) 
Example 62
Project: misp42splunk   Author: remg427   File: __init__.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _cnonce():
    dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
    return dig[:16] 
Example 63
Project: gftools   Author: googlefonts   File: gftools-build-vf.py    Apache License 2.0 5 votes vote down vote up
def intro():
    """
    Gives basic script info.
    """
    printG("#    # #####        #####    ################")
    printG("#    # #            #   #    #   ##         #")
    printG(" #  #  ####          #   #  #   # #   #######")
    printG(" #  #  #     <---->  #    ##    # #      #")
    printG("  ##   #              #        #  #   ####")
    printG("  ##   #              ##########  #####")
    print("\n**** Starting variable font build script:")
    print("     [+]", time.ctime())
    printG("    [!] Done") 
Example 64
Project: HGCAL_TB2017_WGAN   Author: ThorbenQuast   File: tf_helpers.py    MIT License 5 votes vote down vote up
def save(saver, sess, checkpoint_dir, step):
  model_name = "WGAN.model"
  if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)

  pp.pprint("[%s] saving the parameters after %s steps to %s..." % (time.ctime(), step, checkpoint_dir))
  saver.save(sess,
    os.path.join(checkpoint_dir, model_name),
    global_step=step) 
Example 65
Project: OpenBottle   Author: xiaozhuchacha   File: chunkparser_app.py    MIT License 5 votes vote down vote up
def save_history(self, filename=None):
        if not filename:
            ftypes = [('Chunk Gramamr History', '.txt'),
                      ('All files', '*')]
            filename = tkinter.filedialog.asksaveasfilename(filetypes=ftypes,
                                                      defaultextension='.txt')
            if not filename: return

        with open(filename, 'w') as outfile:
            outfile.write('# Regexp Chunk Parsing Grammar History\n')
            outfile.write('# Saved %s\n' % time.ctime())
            outfile.write('# Development set: %s\n' % self.devset_name)
            for i, (g, p, r, f) in enumerate(self._history):
                hdr = ('Grammar %d/%d (precision=%.2f%%, recall=%.2f%%, '
                       'fscore=%.2f%%)' % (i+1, len(self._history),
                                           p*100, r*100, f*100))
                outfile.write('\n%s\n' % hdr)
                outfile.write(''.join('  %s\n' % line for line in g.strip().split()))

            if not (self._history and self.normalized_grammar ==
                    self.normalize_grammar(self._history[-1][0])):
                if self.chunker is None:
                    outfile.write('\nCurrent Grammar (not well-formed)\n')
                else:
                    outfile.write('\nCurrent Grammar (not evaluated)\n')
                outfile.write(''.join('  %s\n' % line for line
                                  in self.grammar.strip().split())) 
Example 66
Project: OpenBottle   Author: xiaozhuchacha   File: downloader.py    MIT License 5 votes vote down vote up
def _log(self, msg):
        self._log_messages.append('%s %s%s' % (time.ctime(),
                                     ' | '*self._log_indent, msg))

    #/////////////////////////////////////////////////////////////////
    # Internals
    #///////////////////////////////////////////////////////////////// 
Example 67
Project: OpenBottle   Author: xiaozhuchacha   File: downloader.py    MIT License 5 votes vote down vote up
def _log(self, msg):
        self._log_messages.append('%s %s%s' % (time.ctime(),
                                     ' | '*self._log_indent, msg))

    #/////////////////////////////////////////////////////////////////
    # Internals
    #///////////////////////////////////////////////////////////////// 
Example 68
Project: pyblish-win   Author: pyblish   File: bdist_wininst.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def get_inidata (self):
        # Return data describing the installation.

        lines = []
        metadata = self.distribution.metadata

        # Write the [metadata] section.
        lines.append("[metadata]")

        # 'info' will be displayed in the installer's dialog box,
        # describing the items to be installed.
        info = (metadata.long_description or '') + '\n'

        # Escape newline characters
        def escape(s):
            return string.replace(s, "\n", "\\n")

        for name in ["author", "author_email", "description", "maintainer",
                     "maintainer_email", "name", "url", "version"]:
            data = getattr(metadata, name, "")
            if data:
                info = info + ("\n    %s: %s" % \
                               (string.capitalize(name), escape(data)))
                lines.append("%s=%s" % (name, escape(data)))

        # The [setup] section contains entries controlling
        # the installer runtime.
        lines.append("\n[Setup]")
        if self.install_script:
            lines.append("install_script=%s" % self.install_script)
        lines.append("info=%s" % escape(info))
        lines.append("target_compile=%d" % (not self.no_target_compile))
        lines.append("target_optimize=%d" % (not self.no_target_optimize))
        if self.target_version:
            lines.append("target_version=%s" % self.target_version)
        if self.user_access_control:
            lines.append("user_access_control=%s" % self.user_access_control)

        title = self.title or self.distribution.get_fullname()
        lines.append("title=%s" % escape(title))
        import time
        import distutils
        build_info = "Built %s with distutils-%s" % \
                     (time.ctime(time.time()), distutils.__version__)
        lines.append("build_info=%s" % build_info)
        return string.join(lines, "\n")

    # get_inidata() 
Example 69
Project: pyblish-win   Author: pyblish   File: test_strftime.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def strftest1(self, now):
        if test_support.verbose:
            print "strftime test for", time.ctime(now)
        now = self.now
        # Make sure any characters that could be taken as regex syntax is
        # escaped in escapestr()
        expectations = (
            ('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
            ('%A', calendar.day_name[now[6]], 'full weekday name'),
            ('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
            ('%B', calendar.month_name[now[1]], 'full month name'),
            # %c see below
            ('%d', '%02d' % now[2], 'day of month as number (00-31)'),
            ('%H', '%02d' % now[3], 'hour (00-23)'),
            ('%I', '%02d' % self.clock12, 'hour (01-12)'),
            ('%j', '%03d' % now[7], 'julian day (001-366)'),
            ('%m', '%02d' % now[1], 'month as number (01-12)'),
            ('%M', '%02d' % now[4], 'minute, (00-59)'),
            ('%p', self.ampm, 'AM or PM as appropriate'),
            ('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
            ('%U', '%02d' % ((now[7] + self.jan1[6])//7),
             'week number of the year (Sun 1st)'),
            ('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
            ('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
            'week number of the year (Mon 1st)'),
            # %x see below
            ('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
            ('%y', '%02d' % (now[0]%100), 'year without century'),
            ('%Y', '%d' % now[0], 'year with century'),
            # %Z see below
            ('%%', '%', 'single percent sign'),
        )

        for e in expectations:
            # musn't raise a value error
            try:
                result = time.strftime(e[0], now)
            except ValueError, error:
                self.fail("strftime '%s' format gave error: %s" % (e[0], error))
            if re.match(escapestr(e[1], self.ampm), result):
                continue
            if not result or result[0] == '%':
                self.fail("strftime does not support standard '%s' format (%s)"
                          % (e[0], e[2]))
            else:
                self.fail("Conflict for %s (%s): expected %s, but got %s"
                          % (e[0], e[2], e[1], result)) 
Example 70
Project: lora-sx1276   Author: raspberrypi-tw   File: gw_rx.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def on_rx_done(self):
        print("\nRxDone")
        print('----------------------------------')

        payload = self.read_payload(nocheck=True)
        data = ''.join([chr(c) for c in payload])

        try:
            _length, _data = packer.Unpack_Str(data)
            print("Time: {}".format( str(time.ctime() )))
            print("Length: {}".format( _length ))
            print("Raw RX: {}".format( payload ))

            try:
                # python3 unicode
                print("Receive: {}".format( _data.encode('latin-1').decode('unicode_escape')))
            except:
                # python2
                print("Receive: {}".format( _data ))
        except:
            print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
            print("Non-hexadecimal digit found...")
            print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
            print("Receive: {}".format( data))



        self.set_dio_mapping([1,0,0,0,0,0])    # TX
        self.set_mode(MODE.STDBY)
  
        sleep(1)
        self.clear_irq_flags(TxDone=1)
        data = {"id":self._id, "data":packer.ACK}
        _length, _ack = packer.Pack_Str( json.dumps(data) )

        try:
            # for python2
            ack = [int(hex(ord(c)), 0) for c in _ack]
        except:
            # for python3 
            ack = [int(hex(c), 0) for c in _ack]

        print("ACK: {}, {}".format( self._id, ack))
        self.write_payload(ack)    
        self.set_mode(MODE.TX) 
Example 71
Project: aospy   Author: spencerahill   File: proj.py    Apache License 2.0 4 votes vote down vote up
def __init__(self, name, description=None, models=None,
                 default_models=None, regions=None, direc_out='',
                 tar_direc_out=''):
        """
        Parameters
        ----------
        name : str
            The project's name.  This should be unique from that of any other
            `Proj` objects being used.
        description : str, optional
            A description of the model.  This is not used internally by
            aospy; it is solely for the user's information.
        regions : {None, sequence of aospy.Region objects}, optional
            The desired regions over which to perform region-average
            calculations.
        models : {None, sequence of aospy.Model objects}, optional
            The child Model objects of this project.
        default_models : {None, sequence of aospy.Run objects}, optional
            The subset of this Model's runs over which to perform calculations
            by default.
        direc_out, tar_direc_out : str
            Path to the root directories of where, respectively, regular output
            and a .tar-version of the output will be saved to disk.

        Notes
        -----
        Instantiating a `Proj` object has the side-effect of setting the `proj`
        attribute of each of it's child `Model` objects to itself.

        See Also
        --------
        aospy.Model, aospy.Region, aospy.Run

        """
        logging.debug("Initializing Project instance: %s (%s)"
                      % (name, time.ctime()))
        self.name = name
        self.description = '' if description is None else description
        self.direc_out = direc_out
        self.tar_direc_out = tar_direc_out

        if models is None:
            self.models = []
        else:
            self.models = models
        if default_models is None:
            self.default_models = []
        else:
            self.default_models = default_models
        if regions is None:
            self.regions = []
        else:
            self.regions = regions

        # Set the `proj` attribute of the children models.
        for model in self.models:
            setattr(model, 'proj', self) 
Example 72
Project: zabbix   Author: xiaomatech   File: eth_info.py    MIT License 4 votes vote down vote up
def zabbix_send(conf_dict):

    net_info_dict = get_net_info(conf_dict)
    eth_info_dict = get_eth_info(conf_dict)

    json_output_file = conf_dict["network"]["json_output_file"]
    zabbix_send_file = conf_dict["common"]["zabbix_send_file"]
    ip_regexp_file = conf_dict['network']["ip_regexp"]

    zabbix_sender = conf_dict["common"]["zabbix_sender"]
    zabbix_conf = conf_dict["common"]["zabbix_conf"]

    log_file = conf_dict["common"]["log_file"]

    cal_result = {}
    cal_dict_now = cal_info(net_info_dict, eth_info_dict, ip_regexp_file)

    monitor_key, monitor_value = check_readonly(conf_dict)

    try:
        file_handle = file(json_output_file)
        cal_dict_last = json.load(file_handle)["last"]
    except:
        pass
    else:
        import platform
        hostname = platform.uname()[1]
        with open(zabbix_send_file, "w") as file_handle:
            for k, v in cal_dict_now.items():
                if k.find("traffic") != -1:
                    file_handle.write("%s %s %d\n" % (hostname, k, (
                        int(v) - int(cal_dict_last[k])) * 8 / 60))
                else:
                    file_handle.write("%s %s %d\n" % (
                        hostname, k, int(v) - int(cal_dict_last[k])))
            file_handle.write("%s %s %d\n" %
                              (hostname, monitor_key, monitor_value))
        cmd = "%s -c %s -i %s" % (zabbix_sender, zabbix_conf, zabbix_send_file)
        status, output = commands.getstatusoutput(cmd)
        if time.strftime("%H") == '23':
            type = 'w'
        else:
            type = 'a'
        with open(log_file, type) as f_h:
            f_h.write("%s %d,%s\n" % (time.ctime(), status, output))

    cal_result["last"] = cal_dict_now
    json_str = json.dumps(cal_result)

    with open(json_output_file, "w") as f:
        f.write(json_str) 
Example 73
Project: kat   Author: TOT0RoKR   File: controller.py    MIT License 4 votes vote down vote up
def initializeKAT(configPath):
    config = Katconfig(configPath)
    kernel_root_dir = vim.vars['KATRootDir'].decode()
    database = kernel_root_dir + '/' + "kat.database"
    katref = kernel_root_dir + '/' + "kat.ref"

    cc_tags = []
    pp_tags = []


    with open(kernel_root_dir + '/.config', "r") as f:
        raw_data = f.read()
    tokens = ccs.scan(raw_data)
    cc_tags = ccp.parse(tokens)
    for it in cc_tags:
        if it.name in global_tags['curconfig']:
            global_tags['curconfig'][it.name].append(it)
        else:
            global_tags['curconfig'][it.name] = [it]

    files = []
    for it in config.files:
        files.append(File(it, kernel_root_dir + '/'))
    files = sorted(files, key=lambda x: x.path)
    katconfig['files'] = files

    # read database
    if os.path.exists(database):
        #  with open(database, "rb") as f:
        f = open(database, "rb")
        katref_time = pickle.load(f)
        if katref_time != time.ctime(os.path.getmtime(katref)):
            f.close()
            pp_tags = initialize_database(katconfig)
        else:
            katref_data = pickle.load(f)
            f.close()
            pp_tags = katref_data
    else:
        pp_tags = initialize_database(katconfig)
    
    #  pp_tags = sorted(pp_tags, key=lambda x: x.path.path)
    for it in pp_tags:
        if it.name in global_tags['preprocess']:
            global_tags['preprocess'][it.name].append(it)
        else:
            global_tags['preprocess'][it.name] = [it]
    #  print("files load success")

    kconfigs = []
    for it in config.kconfigs:
        kconfigs.append(File(it, vim.vars['KATRootDir'].decode() + '/'))
    kconfigs = sorted(kconfigs, key=lambda x: x.path)
    katconfig['kconfigs'] = kconfigs
    #  print("kconfigs load success")
    vim.vars['CompletedLoad'] = True
    initialize_tab() 
Example 74
Project: hacking-tools   Author: girishramnani   File: _pfish_tools.py    MIT License 4 votes vote down vote up
def HashFile(theFile,simplename,o_result):
    if os.path.exists(theFile):
        if os.path.isfile(theFile):
            try:
                f=open(theFile,'rb')
            except IOError:
                log.warning("open failed :"+theFile)
                return
            else:
                try:
                    rd = f.read()
                except IOError:
                    f.close()
                    log.warning("read failed:"+theFile)
                    return

                else:
                    theFileStats=os.stat(theFile)
                    (mode,ino,dev,nlink,uid,gid,size,atime,mtime,ctime)=os.stat(theFile)
                    log.info("Processing File: "+theFile)
                    fileSize = str(size)
                    modifiedTIme= time.ctime(mtime)
                    accessTime = time.ctime(atime)
                    createdTime = time.ctime(ctime)
                    ownerID = str(uid)
                    groupID = str(gid)
                    fileMode = bin(mode)

                    if gl_args.md5:
                        hashout = hashlib.md5()
                        hashout.update(rd)
                        hexmd5 = hashout.hexdigest()
                        hashValue = hexmd5.upper()
                    elif gl_args.sha256:
                        hashout = hashlib.sha256()
                        hashout.update(rd)
                        hexsha256 = hashout.hexdigest()
                        hashValue = hexsha256.upper()
                    elif gl_args.sha512:
                        hashout = hashlib.sha512()
                        hashout.update(rd)
                        hexsha512 = hashout.hexdigest()
                        hashValue = hexsha512.upper()
                    else:
                        log.error("hash not Selected")

                    f.close()
                    o_result.writeCSVrow(simplename,theFile,fileSize,modifiedTIme,accessTime,createdTime,hashValue,ownerID,groupID,mode)
                    return True

        else:
            log.warning("cannot read the file :"+theFile)
            return False
    else:
        log.warning("not a file"+theFile)
        return False 
Example 75
Project: Repobot   Author: Desgard   File: generator.py    MIT License 4 votes vote down vote up
def flatten(self, msg, unixfrom=False, linesep=None):
        r"""Print the message object tree rooted at msg to the output file
        specified when the Generator instance was created.

        unixfrom is a flag that forces the printing of a Unix From_ delimiter
        before the first object in the message tree.  If the original message
        has no From_ delimiter, a `standard' one is crafted.  By default, this
        is False to inhibit the printing of any From_ delimiter.

        Note that for subobjects, no From_ line is printed.

        linesep specifies the characters used to indicate a new line in
        the output.  The default value is determined by the policy.

        """
        # We use the _XXX constants for operating on data that comes directly
        # from the msg, and _encoded_XXX constants for operating on data that
        # has already been converted (to bytes in the BytesGenerator) and
        # inserted into a temporary buffer.
        policy = msg.policy if self.policy is None else self.policy
        if linesep is not None:
            policy = policy.clone(linesep=linesep)
        if self.maxheaderlen is not None:
            policy = policy.clone(max_line_length=self.maxheaderlen)
        self._NL = policy.linesep
        self._encoded_NL = self._encode(self._NL)
        self._EMPTY = ''
        self._encoded_EMTPY = self._encode('')
        # Because we use clone (below) when we recursively process message
        # subparts, and because clone uses the computed policy (not None),
        # submessages will automatically get set to the computed policy when
        # they are processed by this code.
        old_gen_policy = self.policy
        old_msg_policy = msg.policy
        try:
            self.policy = policy
            msg.policy = policy
            if unixfrom:
                ufrom = msg.get_unixfrom()
                if not ufrom:
                    ufrom = 'From nobody ' + time.ctime(time.time())
                self.write(ufrom + self._NL)
            self._write(msg)
        finally:
            self.policy = old_gen_policy
            msg.policy = old_msg_policy 
Example 76
Project: gusto   Author: firedrakeproject   File: state.py    MIT License 4 votes vote down vote up
def __init__(self, filename, ndt, field_points, description,
                 field_creator, comm, create=True):
        """Create a dump file that stores fields evaluated at points.

        :arg filename: The filename.
        :arg field_points: Iterable of pairs (field_name, evaluation_points).
        :arg description: Description of the simulation.
        :arg field_creator: The field creator (only used to determine
            datatype and shape of fields).
        :kwarg create: If False, assume that filename already exists
        """
        # Overwrite on creation.
        self.dump_count = 0
        self.filename = filename
        self.field_points = field_points
        self.comm = comm
        if not create:
            return
        if self.comm.rank == 0:
            with Dataset(filename, "w") as dataset:
                dataset.description = "Point data for simulation {desc}".format(desc=description)
                dataset.history = "Created {t}".format(t=time.ctime())
                # FIXME add versioning information.
                dataset.source = "Output from Gusto model"
                # Appendable dimension, timesteps in the model
                dataset.createDimension("time", None)

                var = dataset.createVariable("time", np.float64, ("time"))
                var.units = "seconds"
                # Now create the variable group for each field
                for field_name, points in field_points:
                    group = dataset.createGroup(field_name)
                    npts, dim = points.shape
                    group.createDimension("points", npts)
                    group.createDimension("geometric_dimension", dim)
                    var = group.createVariable("points", points.dtype,
                                               ("points", "geometric_dimension"))
                    var[:] = points

                    # Get the UFL shape of the field
                    field_shape = field_creator(field_name).ufl_shape
                    # Number of geometric dimension occurences should be the same as the length of the UFL shape
                    field_len = len(field_shape)
                    field_count = field_shape.count(dim)
                    assert field_len == field_count, "Geometric dimension occurrences do not match UFL shape"
                    # Create the variable with the required shape
                    dimensions = ("time", "points") + field_count*("geometric_dimension",)
                    group.createVariable(field_name, field_creator(field_name).dat.dtype, dimensions) 
Example 77
Project: DRCOG_Urbansim   Author: apdjustino   File: spotproforma.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def lookup(self,form,rents,land_costs,parcel_sizes):

  print form, time.ctime()

  rents = np.dot(rents,forms[form]) # get weighted rent for this form
  print rents.mean()

  even_rents = self.min_even_rents_d[form]
  # print "sqft cost\n", even_rents

  #building_bulks = np.reshape(parcel_sizes,(-1,1))*np.reshape(even_rents.index,(1,-1)) # parcel sizes * possible fars
  building_bulks = np.dot(np.reshape(parcel_sizes,(-1,1)), np.reshape(even_rents.index,(1,-1)))

  building_costs = building_bulks * np.reshape(even_rents.values,(1,-1)) / INTERESTRATE # cost to build the new building

  building_costs += np.reshape(land_costs.values,(-1,1)) / INTERESTRATE # add cost to buy the current building

  building_revenue = building_bulks * np.reshape(rents,(-1,1)) / INTERESTRATE # rent to make for the new building


  profit = building_revenue - building_costs # profit for each form //had to get rid of .values because of new numpy



  #### XG: Use a mask, because otherwise nan mess up the optimization
  profitm = np.ma.array(profit, mask=np.isnan(profit))
  maxprofitind = np.argmax(profitm,axis=1) # index maximum total profit
  #### end: XG

  maxprofit = profit[np.arange(maxprofitind.size),maxprofitind] # value of the maximum total profit
  maxprofit_fars = pd.DataFrame(even_rents.index[maxprofitind].astype('float'),index=parcel_sizes.index)
  maxprofit_fars.columns=['fars']
  # far of the max profit

  #XG: use a proper slicing (with pandas 16) to make sure that unprofitable buildings are not produced.
  maxprofit = pd.DataFrame(maxprofit.astype('float'),index=parcel_sizes.index)
  maxprofit.columns=['profit']
  maxprofit.loc[maxprofit['profit']<0, 'profit'] = np.nan # remove unprofitable buildings
  maxprofit_fars.loc[np.isnan(maxprofit['profit']), 'fars']= np.nan  # remove far of unprofitable building
  maxprofit_fars['fars']=maxprofit_fars['fars'].astype('float32')

  return maxprofit_fars, maxprofit 
Example 78
Project: DRCOG_Urbansim   Author: apdjustino   File: lcmnl.py    GNU Affero General Public License v3.0 4 votes vote down vote up
def lcmnl_estimate(cmdata,numclasses,csdata,numalts,chosen,maxiter=MAXITER,emtol=EMTOL,\
                     skipprep=False,csbeta=None,cmbeta=None):

  loglik = -999999
  if csbeta is None: csbeta = [np.random.rand(csdata.shape[1]) for i in range(numclasses)]
  if not skipprep: cmdata = prep_cm_data(cmdata,numclasses)
  if cmbeta is None: cmbeta = np.zeros(cmdata.shape[1])
  
  for i in range(maxiter):
    print "Running iteration %d" % (i+1)
    print time.ctime()

    # EXPECTATION
    print "Running class membership model"
    cmprobs = mnl.mnl_simulate(cmdata,cmbeta,numclasses,GPU=GPU,returnprobs=1)

    csprobs = []
    for cno in range(numclasses):
      tmp = mnl.mnl_simulate(csdata,csbeta[cno],numalts,GPU=GPU,returnprobs=1)
      tmp = np.sum(tmp*chosen,axis=1) # keep only chosen probs
      csprobs.append(np.reshape(tmp,(-1,1)))
    csprobs = np.concatenate(csprobs,axis=1)

    h = csprobs * cmprobs
    oldloglik = loglik
    loglik = np.sum(np.log(np.sum(h,axis=1)))
    print "current csbeta", csbeta
    print "current cmbeta", cmbeta
    print "current loglik", loglik, i+1, "\n\n"
    if abs(loglik-oldloglik) < emtol: break
    wts = h / np.reshape(np.sum(h,axis=1),(-1,1))
   
    # MAXIMIZATION

    for cno in range(numclasses):
      print "Estimating class specific model for class %d" % (cno+1)
      t1 =  time.time()
      weights=np.reshape(wts[:,cno],(-1,1))
      print weights.shape
      fit, results  = mnl.mnl_estimate(csdata,chosen,numalts,GPU=GPU,weights=weights,beta=csbeta[cno])
      print "Finished in %fs" % (time.time()-t1)
      csbeta[cno] = zip(*results)[0]
    
    print "Estimating class membership model"
    t1 =  time.time()
    fit, results = mnl.mnl_estimate(cmdata,None,numclasses,GPU=GPU,weights=wts,lcgrad=True, \
                                             beta=cmbeta,coeffrange=(-1000,1000))
    print "Finished in %fs" % (time.time()-t1)
    cmbeta = zip(*results)[0] 
Example 79
Project: asn1tools   Author: eerimoq   File: __init__.py    MIT License 4 votes vote down vote up
def generate(compiled,
             codec,
             namespace,
             header_name,
             source_name,
             fuzzer_source_name):
    """Generate C source code from given compiled specification.

    `namespace` is used as a prefix for all defines, data structures
    and functions.

    `header_name` is the file name of the C header file, which is
    included by the C source file.

    `source_name` is the file name of the C source file, which is
    needed by the fuzzer makefile.

    `fuzzer_source_name` is the file name of the C source file, which
    is needed by the fuzzer makefile.

    This function returns a tuple of the C header and source files as
    strings.

    """

    date = time.ctime()
    namespace = camel_to_snake_case(namespace)
    include_guard = '{}_H'.format(namespace.upper())

    if codec == 'oer':
        structs, declarations, helpers, definitions = oer.generate(
            compiled,
            namespace)
    elif codec == 'uper':
        structs, declarations, helpers, definitions = uper.generate(
            compiled,
            namespace)
    else:
        raise Exception()

    header = HEADER_FMT.format(version=__version__,
                               date=date,
                               include_guard=include_guard,
                               structs=structs,
                               declarations=declarations)

    source = SOURCE_FMT.format(version=__version__,
                               date=date,
                               header=header_name,
                               helpers=helpers,
                               definitions=definitions)

    fuzzer_source, fuzzer_makefile = _generate_fuzzer_source(
        namespace,
        compiled,
        date,
        header_name,
        source_name,
        fuzzer_source_name)

    return header, source, fuzzer_source, fuzzer_makefile 
Example 80
Project: Miscellaneous   Author: Shinpachi8   File: gitinfo_new.py    Apache License 2.0 4 votes vote down vote up
def gitinfo_scan():

    thread_pool = ThreadPoolManager(8)
    Cookie = login()
    if Cookie is None:
        return

    headers["Cookie"] = Cookie
    htmlSummaryList = [
       "https://github.com/search?o=desc&p={}&q=smtp+pass+mail&l=Java&s=indexed&type=Code&utf8=%E2%9C%93&_pjax=%23js-pjax-container",
       "https://github.com/search?o=desc&p={}&q=smtp+pass+mail&l=Python&s=indexed&type=Code&utf8=%E2%9C%93&_pjax=%23js-pjax-container",
       "https://github.com/search?o=desc&p={}&q=smtp+pass+mail&l=PHP&s=indexed&type=Code&utf8=%E2%9C%93&_pjax=%23js-pjax-container",
       "https://github.com/search?o=desc&p={}&q=smtp+pass+mail&l=INI&s=indexed&type=Code&utf8=%E2%9C%93&_pjax=%23js-pjax-container",
        ]

    # qq_163_mails = set()
    for index, html in enumerate(htmlSummaryList):
        count_add_queue = 0
        x = range(1,80)
        # random.shuffle(x)
        for i in x:
            logging.info("[fetching] " + html.format(i))
            #global headers
            headers["Referer"] = html.format(i)
            # logging.info("request.headers = {}".format(headers))
            try :
                htmlSummary = getHtmlSummary(html.format(i))

                if "You have triggered an abuse detection mechanism." in htmlSummary and "Please wait a few minutes before you try again" in htmlSummary:
                    logging.info("your ip has been baned by github")
                    time.sleep(60*3)
                urllist = getHtmlurl(htmlSummary)
            except Exception as e:
                logging.info("[-][Error] Line 145.\t" +  repr(e))
                continue
            # print urllist
            for url in urllist:
                try:
                    if 'blob' in url:
                        url = url.split('blob/')[0] + url.split('blob/')[1]
                    else:
                        # print "blob not in: {}".format(url)
                        continue
                    url = "https://raw.githubusercontent.com" + url
                    # print "[+] Parsing Url:\t" + url
                    thread_pool.add_job(pickemail, url)
                    count_add_queue += 1

                except Exception as e:
                    logging.info("[main] [Error]" + repr(e))
            # print time.ctime() + "\tcount_add_queue:\t{}".format(count_add_queue)
            time.sleep(random.randint(1, 5))
        print "Add {} item in url: [{}]".format(count_add_queue, html.format(1))

    thread_pool.start_work()
    thread_pool.work_queue.join()