Python os.path.getctime() Examples

The following are code examples for showing how to use os.path.getctime(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: fantasy-football-bot   Author: amarvin   File: utils.py    MIT License 6 votes vote down vote up
def load():
    '''Load latest scraped data
    '''

    # Find latest csv file
    folder = join('.', 'data')
    files = [f for f in listdir(folder) if isfile(join(folder, f))]
    latest_file = max([join(folder, f) for f in files], key=getctime)
    latest_filename = split(latest_file)[1]

    # Unpack data
    week = re.findall(r'\d+', latest_filename)[-1]
    week = int(week)
    df = pd.read_csv(latest_file)

    # Return results
    return df, week 
Example 2
Project: Stock_Market_Forecast   Author: cuevas1208   File: data_Load.py    MIT License 5 votes vote down vote up
def getFundamentalData(stock_name ="FRED/GDP"):
    """
    # Dowloads data and save it as CSV
    # to get fundamentals you would have to pay
    # https://www.reddit.com/r/algotrading/comments/4byj5k/is_there_a_python_script_to_get_historical
    # in my I will be using quandl
    # The Quandl Python module is free. If you would like to make more than 50 calls a day,
    # however, you will need to create a free Quandl account and set your API key.
    """

    # refresh files only if they haven't done within the day
    fileName = stock_name.replace("/", "_")
    filePath = "../data/" + fileName + '.csv'

    # refresh data ones a day
    todayDate = datetime.now().date() 

    # if file exist, get files Data. Else stamp old date
    if (path.exists(filePath)):
        fileDate = datetime.fromtimestamp(path.getctime(filePath)).date()
    else:
        fileDate = datetime.now().date() - timedelta(days=1)
        
    if todayDate > fileDate:

        # We would like all available data from 01/01/2000 until 12/31/2016.
        start_date = '2010-01-01'
        end_date = todayDate

        import quandl
        # User pandas_reader.data.DataReader to load the desired data. As simple as that.
        panel_data = quandl.get(stock_name, start_date="2001-12-31", end_date = todayDate)

        panel_data.to_csv(filePath)

    else:
        print("file is updated")
               
    return filePath 
Example 3
Project: mod   Author: wallarelvo   File: load_comp_times.py    GNU General Public License v2.0 5 votes vote down vote up
def generate_time_df():
    cols = ["vehicles", "capacity", "waiting_time", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for v, c, wt, d in product(vehicles, caps, waiting_times, range(1, 8)):
        s, e = get_comp_filenames(v, c, wt, 0, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [v, c, wt, d - 1, diff]
        counter += 1
    return data 
Example 4
Project: mod   Author: wallarelvo   File: load_comp_times.py    GNU General Public License v2.0 5 votes vote down vote up
def generate_interval_time_df():
    cols = ["interval", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, d in product(intervals, range(1, 8)):
        s, e = get_interval_comp_filenames(i, d)
        total_secs = (24 * 60 * 60) / i
        diff = (path.getctime(e) - path.getctime(s)) / total_secs
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, d - 1, diff]
        counter += 1
    return data 
Example 5
Project: mod   Author: wallarelvo   File: load_comp_times.py    GNU General Public License v2.0 5 votes vote down vote up
def generate_demand_time_df():
    cols = ["demand", "capacity", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, c, d in product(demands, [1, 4], range(1, 8)):
        s, e = get_demand_comp_filenames(i, c, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, c, d - 1, diff]
        counter += 1
    return data 
Example 6
Project: mod   Author: wallarelvo   File: load_comp_times.py    GNU General Public License v2.0 5 votes vote down vote up
def generate_hour_time_df():
    cols = ["hour", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, d in product(["same", "t12", "t19"], range(1, 8)):
        s, e = get_hour_comp_filenames(i, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, d - 1, diff]
        counter += 1
    return data 
Example 7
Project: elite_bgs_companion   Author: breca   File: BGS Companion.py    GNU General Public License v3.0 5 votes vote down vote up
def monitor_journal(self):
        while not self.stopped():
            self.newfile = False
            try:
                newest = max(glob.iglob(path.join(runtime['journal_path'], '*.log')), key=path.getctime)

                if runtime['journal_file'] != newest:
                    runtime['journal_file'] = newest
                    self.msgqueue.put('New journal detected. Opening file.')
                    sanitized_name = newest.split('\\')[-1]
                    log.info('JournalMonitor found new file: ' + sanitized_name)
                    self.journal_file = open(newest, 'r')
                    log.debug('JournalMonitor resetting offset value.')
                    self.journal_byte_offset = 0
                    self.newfile = True
                elif self.newfile == False:
                    if not hasattr(self, 'journal_byte_offset'):
                        self.journal_byte_offset = runtime['journal_byte_offset']
                        log.debug('JournalMonitor using offset ' + str(self.journal_byte_offset)) ##DEBUG
                    if not hasattr(self, 'journal_file'):
                        log.info('JournalMonitor opening old journal.')
                        self.journal_file = open(newest, 'r')
                        log.debug('JournalMonitor moving to previous offset.')
                        self.journal_file.seek(self.journal_byte_offset)
            except ValueError:
                msg = 'Could not find journal files at "{}".'.format(runtime['journal_path'])
                log.error(msg)
                self.msgqueue.put(msg)
            except Exception as e:
                log.exception('JournalMonitor hit exception checking for file.', e)

            try:
                file_data = self.journal_file.readline()
                self.journal_byte_offset = self.journal_file.tell()
                self.parse(file_data)
                sleep(0.05)
            except Exception as e:
                log.exception('Exception occured parsing JSON.', e) 
Example 8
Project: elite_bgs_companion   Author: breca   File: eddn_sender.py    GNU General Public License v3.0 5 votes vote down vote up
def shipyard_monitor(self):
        try:
            filename = max(glob.iglob(path.join(self.journal_path, 'Shipyard.json')), key=path.getctime)
            with open(filename, 'r') as f:
                _list = f.readlines() # Frontier decided to put newlines everywhere so we get a list
                _lines = ''.join(_list)
                _json = json.loads(_lines)

            # see how stale this timestamp is
            timestamp_now = datetime.utcnow()
            timestamp_file = datetime.strptime(_json['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
            # subtract now from timestamp in file
            # if there's more than 30 seconds between them, ignore
            timestamp_drift = timestamp_now - timestamp_file

            #log.debug('EDDN Dispatcher calculated data timestamp drift between json and realtime for Shipyard.json: {}'.format(timestamp_drift.total_seconds())) #DEBUG
            if not timestamp_drift.total_seconds() > 5:
                # Check to see if this data matches data previously stored in this class
                if hasattr(self, 'old_shipyard_lines'):
                    if self.old_shipyard_lines == _json:
                        #log.debug('EDDN Dispatcher has already processed this Shipyard.json. Ignoring.')
                        return
                else:
                    # We'll store the json, just to try and ensure we don't double up or spam data
                    self.old_shipyard_lines = _json
                    # Construct and send the payload
                    self.send_shipyard(_json)
            #else:
                #log.debug('EDDN Dispatcher found stale Shipyard data.')
        except Exception as e:
            log.exception('EDDN Dispatcher encountered error trying to post Shipyard data.', e)
            pass


    # DID YOU JUST ACCUSE ME OF COPYING AND PASTING BECAUSE I WAS TOO LAZY TO WRITE A SMALL FUNCTION?! 
Example 9
Project: elite_bgs_companion   Author: breca   File: eddn_sender.py    GNU General Public License v3.0 5 votes vote down vote up
def outfitting_monitor(self):
        try:
            filename = max(glob.iglob(path.join(self.journal_path, 'Outfitting.json')), key=path.getctime)
            with open(filename, 'r') as f:
                _list = f.readlines() # Frontier decided to put newlines everywhere so we get a list
                _lines = ''.join(_list)
                _json = json.loads(_lines)

            # see how stale this timestamp is
            timestamp_now = datetime.utcnow()
            timestamp_file = datetime.strptime(_json['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
            # subtract now from timestamp in file
            # if there's more than 30 seconds between them, ignore
            timestamp_drift = timestamp_now - timestamp_file

            #log.debug('EDDN Dispatcher calculated data timestamp drift between json and realtime for Outfitting.json: {}'.format(timestamp_drift.total_seconds())) #DEBUG
            if not timestamp_drift.total_seconds() > 5:
                # Check to see if this data matches data previously stored in this class
                if hasattr(self, 'old_outfitting_lines'):
                    if self.old_outfitting_lines == _json:
                        #log.debug('EDDN Dispatcher has already processed this Outfitting.json. Ignoring.')
                        return
                else:
                    # We'll store the json, just to try and ensure we don't double up or spam data
                    self.old_outfitting_lines = _json
                    # Construct and send the payload
                    self.send_outfitting(_json)
            # else:
            #     log.debug('EDDN Dispatcher found stale Outfitting data.')
        except Exception as e:
            log.exception('EDDN Dispatcher encountered error trying to post Outfitting data.', e)
            pass


    # DID YOU JUST ACCUSE ME OF COPYING AND PASTING BECAUSE I WAS TOO LAZY TO WRITE A SMALL FUNCTION?! AGAIN?! 
Example 10
Project: elite_bgs_companion   Author: breca   File: eddn_sender.py    GNU General Public License v3.0 5 votes vote down vote up
def market_monitor(self):
        try:
            filename = max(glob.iglob(path.join(self.journal_path, 'Market.json')), key=path.getctime)
            with open(filename, 'r') as f:
                _list = f.readlines() # Frontier decided to put newlines everywhere so we get a list
                _lines = ''.join(_list)
                _json = json.loads(_lines)

            # see how stale this timestamp is
            timestamp_now = datetime.utcnow()
            timestamp_file = datetime.strptime(_json['timestamp'], '%Y-%m-%dT%H:%M:%SZ')
            # subtract now from timestamp in file
            # if there's more than 30 seconds between them, ignore
            timestamp_drift = timestamp_now - timestamp_file

            #log.debug('EDDN Dispatcher calculated data timestamp drift between json and realtime for Market.json: {}'.format(timestamp_drift.total_seconds())) #DEBUG
            if not timestamp_drift.total_seconds() > 5:
                # Check to see if this data matches data previously stored in this class
                if hasattr(self, 'old_market_lines'):
                    if self.old_market_lines == _json:
                        #log.debug('EDDN Dispatcher has already processed this Market.json. Ignoring.')
                        return
                else:
                    # We'll store the json, just to try and ensure we don't double up or spam data
                    self.old_market_lines = _json
                    # Construct and send the payload
                    self.send_market(_json)
            #else:
                #log.debug('EDDN Dispatcher found stale Market data.')
        except Exception as e:
            log.exception('EDDN Dispatcher encountered error trying to post Market data.', e)
            pass 
Example 11
Project: analyzePF   Author: analyzeDFIR   File: prefetch.py    MIT License 5 votes vote down vote up
def get_metadata(self, simple_hash=True):
        '''
        Args:
            simple_hash: Boolean    => whether to only collect SHA256 hash or 
                                       MD5 and SHA1 as well
        Returns:
            Container<String, Any>
            Container of metadata about this prefetch file:
                file_name: prefetch file name
                file_path: full path on local system
                file_size: size of file on local system
                md5hash: MD5 hash of prefetch file
                sha1hash: SHA1 hash of prefetch file
                sha2hash: SHA256 hash of prefetch file
                modify_time: last modification time of prefetch file on local system
                access_time: last access time of prefetch file on local system
                create_time: create time of prefetch file on local system
        Preconditions:
            simple_hash is of type Boolean
        '''
        assert isinstance(simple_hash, bool), 'Simple_hash is of type Boolean'
        return Container(\
            file_name=path.basename(self._filepath),
            file_path=path.abspath(self._filepath),
            file_size=path.getsize(self._filepath),
            md5hash=self._hash_file('md5') if not simple_hash else None,
            sha1hash=self._hash_file('sha1') if not simple_hash else None,
            sha2hash=self._hash_file('sha256'),
            modify_time=datetime.fromtimestamp(path.getmtime(self._filepath), tzlocal()).astimezone(tzutc()),
            access_time=datetime.fromtimestamp(path.getatime(self._filepath), tzlocal()).astimezone(tzutc()),
            create_time=datetime.fromtimestamp(path.getctime(self._filepath), tzlocal()).astimezone(tzutc())\
        ) 
Example 12
Project: clist   Author: aropan   File: requester.py    Apache License 2.0 5 votes vote down vote up
def __del__(self):
        if not isdir(self.dir_cache):
            return

        for file_cache in listdir(self.dir_cache):
            diff_time = (datetime.now() - datetime.fromtimestamp(getctime(self.dir_cache + file_cache)))
            if diff_time.seconds >= self.cache_timeout:
                remove(self.dir_cache + file_cache) 
Example 13
Project: django-static-compress   Author: whs   File: mixin.py    MIT License 5 votes vote down vote up
def get_created_time(self, name):
        if self.keep_original:
            return super().get_created_time(name)
        return self._datetime_from_timestamp(getctime(self.get_alternate_compressed_path(name))) 
Example 14
Project: multiuserblazeserver   Author: ContinuumIO   File: build.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def newest_file(root):
    path = join(root, "*tar.bz2")
    newest = max(glob.iglob(path), key=getctime)
    return newest 
Example 15
Project: PyFlow   Author: wonderworks-software   File: PathLib.py    Apache License 2.0 5 votes vote down vote up
def getctime(path=("StringPin", "", {PinSpecifires.INPUT_WIDGET_VARIANT: "PathWidget"})):
        '''Return the system’s ctime which, on some systems (like Unix) is the time of the last metadata change, and, on others (like Windows), is the creation time for path. The return value is a number giving the number of seconds since the epoch (see the time module). Raise os.error if the file does not exist or is inaccessible.'''
        return osPath.getctime(path, path2) 
Example 16
Project: PyFlow   Author: wonderworks-software   File: PathLib.py    Apache License 2.0 5 votes vote down vote up
def getsize(path=("StringPin", "", {PinSpecifires.INPUT_WIDGET_VARIANT: "PathWidget"})):
        '''Return the size, in bytes, of path. Raise os.error if the file does not exist or is inaccessible.'''
        return osPath.getctime(path, path2) 
Example 17
Project: upribox   Author: usableprivacy   File: info.py    GNU General Public License v3.0 5 votes vote down vote up
def get_update_time(self):
        try:
            from os.path import getctime
            from datetime import datetime
            self.update_utc_time = datetime.utcfromtimestamp(getctime(self.ANSIBLE_PULL_LOG_FILE))
        except:
            pass 
Example 18
Project: MishMash   Author: nicfit   File: sync.py    GNU General Public License v3.0 4 votes vote down vote up
def handleDirectory(self, d, _):
        pout(Fg.blue("Syncing directory") + ": " + str(d))
        audio_files = list(self._file_cache)
        self._file_cache = []

        image_files = self._dir_images
        self._dir_images = []

        if not audio_files:
            return

        d_datetime = datetime.fromtimestamp(getctime(d))

        album_type = self._albumTypeHint(audio_files) or LP_TYPE

        album = None
        session = self._db_session
        for audio_file in audio_files:
            try:
                track, album = self._syncAudioFile(audio_file, album_type, d_datetime, session)
            except Exception:
                # TODO: log and skip????
                raise

        if album:
            # Directory images.
            for img_file in image_files:
                img_type = art.matchArtFile(img_file)
                if img_type is None:
                    log.warning(f"Skipping unrecognized image file: {img_file}")
                    continue

                new_img = Image.fromFile(img_file, img_type)
                if new_img:
                    new_img.description = os.path.basename(img_file)
                    syncImage(new_img, album if img_type in IMAGE_TYPES["album"]
                                             else album.artist,
                              session)
                else:
                    log.warning(f"Invalid image file: {img_file}")

        session.commit()
        if self.args.monitor:
            self._watchDir(d)