Python os.path.isfile() Examples

The following are code examples for showing how to use os.path.isfile(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput_Augmented_With_Ortho.py    GNU General Public License v3.0 7 votes vote down vote up
def combineToOneFile():
#horrible function to combine all the files with stiffnesses into one file
    os.chdir(output_directory)
    txtList = []
    allfiles = [f for f in listdir(output_directory) if isfile(join(output_directory,f))]
    for files in allfiles:
        if files.endswith('.txt'):
            txtList.append(files)
    arrayOfStiffness = []
    o = open('allStiffness.txt', 'w')
    o.write('Interface Stiffness: ' + str(interfaceMod) + ' GPa \n')
    o.write('Cement Stiffness: ' + str(cementMod) + ' GPa \n')
    for currentfile in txtList:
        f = open(currentfile, 'r')
        i=0
        for line in f:
            if i == 10:
                print >> sys.__stdout__, line
                o.write(currentfile[:-4] + ': ' + line[35:])
                arrayOfStiffness.append(line)
            i = i + 1
        f.close()
    o.close() 
Example 2
Project: Gurux.DLMS.Python   Author: Gurux   File: GXManufacturerCollection.py    GNU General Public License v2.0 6 votes vote down vote up
def isUpdatesAvailable(cls, path):
        if sys.version_info < (3, 0):
            return False
        # pylint: disable=broad-except
        if not os.path.isfile(os.path.join(path, "files.xml")):
            return True
        try:
            available = dict()
            for it in ET.parse(os.path.join(path, "files.xml")).iter():
                if it.tag == "File":
                    available[it.text] = datetime.datetime.strptime(it.attrib["Modified"], "%d-%m-%Y")

            path = NamedTemporaryFile()
            path.close()
            urllib.request.urlretrieve("https://www.gurux.fi/obis/files.xml", path.name)
            for it in ET.parse(path.name).iter():
                if it.tag == "File":
                    tmp = datetime.datetime.strptime(it.attrib["Modified"], "%d-%m-%Y")
                    if not it.text in available or available[it.text] != tmp:
                        return True
        except Exception as e:
            print(e)
            return True
        return False 
Example 3
Project: Gurux.DLMS.Python   Author: Gurux   File: GXManufacturerCollection.py    GNU General Public License v2.0 6 votes vote down vote up
def readManufacturerSettings(cls, manufacturers, path):
        # pylint: disable=broad-except
        manufacturers = []
        files = [f for f in listdir(path) if isfile(join(path, f))]
        if files:
            for it in files:
                if it.endswith(".obx"):
                    try:
                        manufacturers.append(cls.__parse(os.path.join(path, it)))
                    except Exception as e:
                        print(e)
                        continue

    #
    # Serialize manufacturer from the xml.
    #
    # @param in
    #            Input stream.
    # Serialized manufacturer.
    # 
Example 4
Project: PEAKachu   Author: tbischler   File: window.py    ISC License 6 votes vote down vote up
def init_libraries(self, paired_end, max_insert_size, ctr_libs,
                       exp_libs):
        self._paired_end = paired_end
        self._max_insert_size = max_insert_size
        self._ctr_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in ctr_libs]
        self._exp_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in exp_libs]
        # add libs to lib_dict
        for lib_file in exp_libs + ctr_libs:
            if not isfile(lib_file):
                sys.stderr.write("ERROR: The library file {} does not exist.\n"
                                 .format(lib_file))
                sys.exit(1)
            self._lib_dict[splitext(basename(lib_file))[0]] = Library(
                paired_end, max_insert_size, lib_file,
                deepcopy(self._replicon_dict))
        self._lib_names_list = list(self._lib_dict.keys())
        print("The following libraries were initialized:\n"
              "# Experiment libraries\n{0}\n"
              "# Control libraries\n{1}".format(
                  '\n'.join(self._exp_lib_list),
                  '\n'.join(self._ctr_lib_list))) 
Example 5
Project: PEAKachu   Author: tbischler   File: adaptive.py    ISC License 6 votes vote down vote up
def init_libraries(self, paired_end, max_insert_size, ctr_libs,
                       exp_libs):
        self._paired_end = paired_end
        self._max_insert_size = max_insert_size
        self._ctr_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in ctr_libs]
        self._exp_lib_list = [splitext(basename(lib_file))[0]
                              for lib_file in exp_libs]
        # add libs to lib_dict
        for lib_file in exp_libs + ctr_libs:
            if not isfile(lib_file):
                sys.stderr.write(
                    "ERROR: The library file {} does not exist.\n".format(
                        lib_file))
                sys.exit(1)
            self._lib_dict[splitext(basename(lib_file))[0]] = Library(
                paired_end, max_insert_size, lib_file,
                deepcopy(self._replicon_dict))
        self._lib_names_list = list(self._lib_dict.keys())
        print("The following libraries were initialized:\n"
              "# Experiment libraries\n{0}\n"
              "# Control libraries\n{1}".format(
                  '\n'.join(self._exp_lib_list),
                  '\n'.join(self._ctr_lib_list))) 
Example 6
Project: Att-ChemdNER   Author: lingluodlut   File: utils.py    Apache License 2.0 6 votes vote down vote up
def get_perf(filename):
    ''' run conlleval.pl perl script to obtain
    precision/recall and F1 score '''
    _conlleval = PREFIX + 'conlleval'
    if not isfile(_conlleval):
        #download('http://www-etud.iro.umontreal.ca/~mesnilgr/atis/conlleval.pl') 
        os.system('wget https://www.comp.nus.edu.sg/%7Ekanmy/courses/practicalNLP_2008/packages/conlleval.pl')
        chmod('conlleval.pl', stat.S_IRWXU) # give the execute permissions
    
    out = []
    proc = subprocess.Popen(["perl", _conlleval], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
    stdout, _ = proc.communicate(open(filename).read())
    for line in stdout.split('\n'):
        if 'accuracy' in line:
            out = line.split()
            break
    
    # out = ['accuracy:', '16.26%;', 'precision:', '0.00%;', 'recall:', '0.00%;', 'FB1:', '0.00']
    precision = float(out[3][:-2])
    recall    = float(out[5][:-2])
    f1score   = float(out[7])

    return {'p':precision, 'r':recall, 'f1':f1score} 
Example 7
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput_Augmented_With_Yield.py    GNU General Public License v3.0 6 votes vote down vote up
def combineToOneFile():
#horrible function to combine all the files with stiffnesses into one file
    os.chdir(output_directory)
    txtList = []
    allfiles = [f for f in listdir(output_directory) if isfile(join(output_directory,f))]
    for files in allfiles:
        if files.endswith('.txt'):
            txtList.append(files)
    arrayOfStiffness = []
    o = open('allStiffness.txt', 'w')
    o.write('Interface Stiffness: ' + str(interfaceMod) + ' GPa \n')
    o.write('Cement Stiffness: ' + str(cementMod) + ' GPa \n')
    for currentfile in txtList:
        f = open(currentfile, 'r')
        i=0
        for line in f:
            if i == 10:
                print >> sys.__stdout__, line
                o.write(currentfile[:-4] + ': ' + line[35:])
                arrayOfStiffness.append(line)
            i = i + 1
        f.close()
    o.close() 
Example 8
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput.py    GNU General Public License v3.0 6 votes vote down vote up
def combineToOneFile():
#horrible function to combine all the files with stiffnesses into one file
    os.chdir(output_directory)
    txtList = []
    allfiles = [f for f in listdir(output_directory) if isfile(join(output_directory,f))]
    for files in allfiles:
        if files.endswith('.txt'):
            txtList.append(files)
    arrayOfStiffness = []
    o = open('allStiffness.txt', 'w')
    for currentfile in txtList:
        f = open(currentfile, 'r')
        i=0
        for line in f:
            if i == 10:
                print line
                o.write(currentfile[:-4] + ': ' + line[35:])
                arrayOfStiffness.append(line)
            i = i + 1
        f.close()
    o.close() 
Example 9
Project: sklearn2docker   Author: KhaledSharif   File: classes.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def __init__(self, configuration_file: dict):
        self.expected_column_names = configuration_file["feature_names"]
        self.class_names = configuration_file["class_names"]

        assert isfile("/sklearn2docker/classifier.pkl")

        if "keras_model_weights" in configuration_file:
            assert isfile(configuration_file["keras_model_weights"])

            if len(self.class_names) == 2:
                self.classifier_object = KerasBinaryClassifier("/sklearn2docker/classifier.pkl", configuration_file["keras_model_weights"])
            else:
                raise NotImplementedError()
        else:
            self.classifier_object = ScikitLearnClassifier("/sklearn2docker/classifier.pkl")

        assert hasattr(self.classifier_object, "predict")
        assert hasattr(self.classifier_object, "predict_proba") 
Example 10
Project: calmjs   Author: calmjs   File: toolchain.py    GNU General Public License v2.0 6 votes vote down vote up
def compile_bundle_entry(self, spec, entry):
        """
        Handler for each entry for the bundle method of the compile
        process.  This copies the source file or directory into the
        build directory.
        """

        modname, source, target, modpath = entry
        bundled_modpath = {modname: modpath}
        bundled_target = {modname: target}
        export_module_name = []
        if isfile(source):
            export_module_name.append(modname)
            copy_target = join(spec[BUILD_DIR], target)
            if not exists(dirname(copy_target)):
                makedirs(dirname(copy_target))
            shutil.copy(source, copy_target)
        elif isdir(source):
            copy_target = join(spec[BUILD_DIR], modname)
            shutil.copytree(source, copy_target)

        return bundled_modpath, bundled_target, export_module_name 
Example 11
Project: rowgenerators   Author: Metatab   File: old_test_basic.py    MIT License 6 votes vote down vote up
def test_d_and_c(self):
            from csv import DictReader
            from old.fetch import download_and_cache

            from os.path import isfile

            cache = TempFS()

            with open(data_path('sources.csv')) as f:
                for e in DictReader(f):
                    try:
                        d = download_and_cache(SourceSpec(**e), cache)
                    except ModuleNotFoundError:
                        # For when metatab isn't installed.
                        continue

                    self.assertTrue(isfile(d['sys_path'])) 
Example 12
Project: openhatch   Author: campbe13   File: test_html_formatter.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def test_external_css(self):
        # test correct behavior
        # CSS should be in /tmp directory
        fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
        # CSS should be in TESTDIR (TESTDIR is absolute)
        fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
                             outencoding='utf-8')
        tfile = tempfile.NamedTemporaryFile(suffix='.html')
        fmt1.format(tokensource, tfile)
        try:
            fmt2.format(tokensource, tfile)
            self.assertTrue(isfile(join(TESTDIR, 'fmt2.css')))
        except IOError:
            # test directory not writable
            pass
        tfile.close()

        self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css')))
        os.unlink(join(dirname(tfile.name), 'fmt1.css'))
        try:
            os.unlink(join(TESTDIR, 'fmt2.css'))
        except OSError:
            pass 
Example 13
Project: aridi   Author: dpgon   File: gathering0.py    GNU General Public License v3.0 6 votes vote down vote up
def loadports(file):
        try:
            if isfile(file):
                with open(file, "r") as f:
                    portlist = f.readlines()
                for item in portlist:
                    item = item.strip().split("|")
                    if not item[1] in Precheck.portnames.keys():
                        if len(item) > 2:
                            Precheck.portnames[int(item[1])] = [item[0], item[2]]
                        else:
                            Precheck.portnames[int(item[1])] = [item[0], ""]
                return True
            else:
                return False
        except:
            return False 
Example 14
Project: recipe-box   Author: rtlee9   File: get_pictures.py    MIT License 6 votes vote down vote up
def save_picture(recipes_raw, url):
    recipe = recipes_raw[url]
    path_save = path.join(
        config.path_img, '{}.jpg'.format(URL_to_filename(url)))
    if not path.isfile(path_save):
        if 'picture_link' in recipe:
            link = recipe['picture_link']
            if link is not None:
                try:
                    if 'epicurious' in url:
                        img_url = 'https://{}'.format(link[2:])
                        urllib.request.urlretrieve(img_url, path_save)
                    else:
                        urllib.request.urlretrieve(link, path_save)
                except:
                    print('Could not download image from {}'.format(link)) 
Example 15
Project: Authenticator   Author: bilelmoussaoui   File: qr_reader.py    GNU General Public License v2.0 6 votes vote down vote up
def read(self):
        try:
            from PIL import Image
            from pyzbar.pyzbar import decode
            decoded_data = decode(Image.open(self.filename))
            if path.isfile(self.filename):
                remove(self.filename)
            try:
                url = urlparse(decoded_data[0].data.decode())
                query_params = parse_qsl(url.query)
                self._codes = dict(query_params)
                return self._codes.get("secret")
            except (KeyError, IndexError):
                Logger.error("Invalid QR image")
                return None
        except ImportError:
            from ..application import Application
            Application.USE_QRSCANNER = False
            QRReader.ZBAR_FOUND = False 
Example 16
Project: razzy-spinner   Author: rafasashi   File: senna.py    GNU General Public License v3.0 6 votes vote down vote up
def __init__(self, senna_path, operations, encoding='utf-8'):
        self._encoding = encoding
        self._path = path.normpath(senna_path) + sep 
        
        # Verifies the existence of the executable on the self._path first    
        #senna_binary_file_1 = self.executable(self._path)
        exe_file_1 = self.executable(self._path)
        if not path.isfile(exe_file_1):
            # Check for the system environment 
            if 'SENNA' in environ:
                #self._path = path.join(environ['SENNA'],'')  
                self._path = path.normpath(environ['SENNA']) + sep 
                exe_file_2 = self.executable(self._path)
                if not path.isfile(exe_file_2):
                    raise OSError("Senna executable expected at %s or %s but not found" % (exe_file_1,exe_file_2))
        
        self.operations = operations 
Example 17
Project: lineflow   Author: tofunlp   File: imdb_pytorch.py    MIT License 6 votes vote down vote up
def build_vocab(tokens, cache='vocab.pkl', max_size=50000):
    if not osp.isfile(cache):
        counter = Counter(tokens)
        words, _ = zip(*counter.most_common(max_size))
        words = [PAD_TOKEN, UNK_TOKEN] + list(words)
        token_to_index = dict(zip(words, range(len(words))))
        if START_TOKEN not in token_to_index:
            token_to_index[START_TOKEN] = len(token_to_index)
            words += [START_TOKEN]
        if END_TOKEN not in token_to_index:
            token_to_index[END_TOKEN] = len(token_to_index)
            words += [END_TOKEN]
        with open(cache, 'wb') as f:
            pickle.dump((token_to_index, words), f)
    else:
        with open(cache, 'rb') as f:
            token_to_index, words = pickle.load(f)

    return token_to_index, words 
Example 18
Project: lineflow   Author: tofunlp   File: seq2seq_pytorch.py    MIT License 6 votes vote down vote up
def build_vocab(tokens, cache='vocab.pkl', max_size=50000):
    if not osp.isfile(cache):
        counter = Counter(tokens)
        words, _ = zip(*counter.most_common(max_size))
        words = [PAD_TOKEN, UNK_TOKEN] + list(words)
        token_to_index = dict(zip(words, range(len(words))))
        if START_TOKEN not in token_to_index:
            token_to_index[START_TOKEN] = len(token_to_index)
            words += [START_TOKEN]
        if END_TOKEN not in token_to_index:
            token_to_index[END_TOKEN] = len(token_to_index)
            words += [END_TOKEN]
        with open(cache, 'wb') as f:
            pickle.dump((token_to_index, words), f)
    else:
        with open(cache, 'rb') as f:
            token_to_index, words = pickle.load(f)

    return token_to_index, words 
Example 19
Project: Gurux.DLMS.Python   Author: Gurux   File: GXManufacturerCollection.py    GNU General Public License v2.0 5 votes vote down vote up
def isFirstRun(cls, path):
        if not os.path.isdir(path):
            os.mkdir(path)
            return True
        if not os.path.isfile(os.path.join(path, "files.xml")):
            return True
        return False

    #
    # Check if there are any updates available in Gurux www server.
    #
    # @param path
    #            Settings directory.
    # Returns true if there are any updates available.
    # 
Example 20
Project: leapp-repository   Author: oamg   File: sctplib.py    Apache License 2.0 5 votes vote down vote up
def anyfile(files):
    """
    Determines if any of the given paths exist and are a file.

    :return: True if any of the given paths exists and it is a file.
    :rtype: bool
    """
    for f in files:
        try:
            if isfile(f):
                return True
        except OSError:
            continue
    return False 
Example 21
Project: PEAKachu   Author: tbischler   File: replicons.py    ISC License 5 votes vote down vote up
def _check_annotations(self):
        if self._gff_folder is None:
            print("No folder with .gff files specified")
        else:
            gff_files = [join(self._gff_folder, f) for f in listdir(
                self._gff_folder) if isfile(join(self._gff_folder, f))]
            if not gff_files:
                print("No .gff file found in specified folder")
            else:
                for gff_file in gff_files:
                    self._store_annotations(gff_file) 
Example 22
Project: PEAKachu   Author: tbischler   File: consensus_peak.py    ISC License 5 votes vote down vote up
def _store_peaks(self):
        peak_table_folder = "{}/peak_tables".format(self._project_folder)
        peak_files = [join(peak_table_folder, f) for f in listdir(
            peak_table_folder) if isfile(join(peak_table_folder, f))]
        for peak_file in peak_files:
            peak_df = pd.read_table(peak_file, sep='\t')
            for peak in peak_df.to_dict("records"):
                self._replicon_peak_dict[peak["replicon"]][
                    peak["peak_strand"]].add(
                        (peak["peak_start"], peak["peak_end"])) 
Example 23
Project: PEAKachu   Author: tbischler   File: consensus_peak.py    ISC License 5 votes vote down vote up
def _get_peak_coverage(self):
        norm_coverage_folder = "{}/normalized_coverage".format(
            self._project_folder)
        coverage_files = [join(norm_coverage_folder, f) for f in listdir(
            norm_coverage_folder) if isfile(join(norm_coverage_folder, f))]
        wiggle_parser = WiggleParser()
        cons_value_dict = defaultdict(dict)
        for coverage_file in coverage_files:
            cons_values = np.zeros(self._consensus_length)
            with open(coverage_file, 'r') as cov_fh:
                for wiggle_entry in wiggle_parser.entries(cov_fh):
                    lib_name_and_strand = wiggle_entry.track_name
                    lib_name = '_'.join(lib_name_and_strand.split('_')[:-1])
                    lib_strand = '+' if lib_name_and_strand.split(
                        '_')[-1] == "forward" else '-'
                    replicon = wiggle_entry.replicon
                    pos_value_pairs = dict(wiggle_entry.pos_value_pairs)
                    self._get_coverage_for_replicon_peaks(
                        replicon, lib_strand, pos_value_pairs, cons_values)
            cons_value_dict[lib_name][lib_strand] = cons_values
        # combine strands
        comb_cons_value_dict = {}
        for lib in cons_value_dict:
            comb_cons_value_dict[lib] = np.zeros(self._consensus_length)
            for strand in cons_value_dict[lib]:
                comb_cons_value_dict[lib] += cons_value_dict[lib][strand]
        return comb_cons_value_dict 
Example 24
Project: pyblish-win   Author: pyblish   File: turtle.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def readconfig(cfgdict):
    """Read config-files, change configuration-dict accordingly.

    If there is a turtle.cfg file in the current working directory,
    read it from there. If this contains an importconfig-value,
    say 'myway', construct filename turtle_mayway.cfg else use
    turtle.cfg and read it from the import-directory, where
    turtle.py is located.
    Update configuration dictionary first according to config-file,
    in the import directory, then according to config-file in the
    current working directory.
    If no config-file is found, the default configuration is used.
    """
    default_cfg = "turtle.cfg"
    cfgdict1 = {}
    cfgdict2 = {}
    if isfile(default_cfg):
        cfgdict1 = config_dict(default_cfg)
        #print "1. Loading config-file %s from: %s" % (default_cfg, os.getcwd())
    if "importconfig" in cfgdict1:
        default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
    try:
        head, tail = split(__file__)
        cfg_file2 = join(head, default_cfg)
    except:
        cfg_file2 = ""
    if isfile(cfg_file2):
        #print "2. Loading config-file %s:" % cfg_file2
        cfgdict2 = config_dict(cfg_file2)
##    show(_CFG)
##    show(cfgdict2)
    _CFG.update(cfgdict2)
##    show(_CFG)
##    show(cfgdict1)
    _CFG.update(cfgdict1)
##    show(_CFG) 
Example 25
Project: pyblish-win   Author: pyblish   File: turtle.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def __init__(self, type_, data=None):
        self._type = type_
        if type_ == "polygon":
            if isinstance(data, list):
                data = tuple(data)
        elif type_ == "image":
            if isinstance(data, basestring):
                if data.lower().endswith(".gif") and isfile(data):
                    data = TurtleScreen._image(data)
                # else data assumed to be Photoimage
        elif type_ == "compound":
            data = []
        else:
            raise TurtleGraphicsError("There is no shape type %s" % type_)
        self._data = data 
Example 26
Project: aospy   Author: spencerahill   File: test_calc_basic.py    Apache License 2.0 5 votes vote down vote up
def _test_files_and_attrs(calc, dtype_out):
    assert isfile(calc.path_out[dtype_out])
    assert isfile(calc.path_tar_out)
    _test_output_attrs(calc, dtype_out) 
Example 27
Project: aospy   Author: spencerahill   File: test_automate.py    Apache License 2.0 5 votes vote down vote up
def assert_calc_files_exist(calcs, write_to_tar, dtypes_out_time):
    """Check that expected calcs were written to files"""
    for calc in calcs:
        for dtype_out_time in dtypes_out_time:
            assert isfile(calc.path_out[dtype_out_time])
            if write_to_tar:
                assert isfile(calc.path_tar_out)
            else:
                assert not isfile(calc.path_tar_out) 
Example 28
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 29
Project: Flask-Python-GAE-Login-Registration   Author: orymeyer   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 30
Project: py-compose   Author: joaorafaelm   File: configuration.py    MIT License 5 votes vote down vote up
def exists(self, filename, extensions):
        for extension in extensions:
            if extension not in filename:
                filename = '{}.{}'.format(filename, extension)
            if isfile(filename):
                self.filename = filename
                self.extension = extension
                return True
        return False 
Example 31
Project: DataHack2018   Author: InnovizTech   File: iou_evaluator.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def evaluate_folder(gt_folder, pred_folder, validmode=False):
    # get all ground truth files
    gt_indices = [data_utils.filename_to_frame(file)
                  for file in data_utils.find_sorted_files(gt_folder, 'labels')]
    print('evaluating folder {} with {} frames...'.format(osp.basename(gt_folder), len(gt_indices)))
    agg_tp = 0
    agg_fn = 0
    agg_fp = 0
    for frame_idx in gt_indices:
        pred_file_path = data_utils.frame_to_filename(pred_folder, frame_idx, 'labels')
        gt_labels = data_utils.read_data(gt_folder, frame_idx, 'labels')
        if not osp.isfile(pred_file_path):
            if validmode:
                continue
            else:
                print("No matching prediction file for frame {} in {}, filling zero labels".format(frame_idx, gt_folder))
                pred_labels = np.zeros_like(gt_labels)
        else:
            pred_labels = data_utils.read_data(pred_folder, frame_idx, 'labels')

        if len(gt_labels) != len(pred_labels):
            raise ValueError('GT point count ({}) does not match predicion point count ({}) in file: {}'
                             .format(len(gt_labels), len(pred_labels), frame_idx))

        tp, fn, fp = evaluate_frame(gt_labels, pred_labels)
        agg_tp += tp
        agg_fn += fn
        agg_fp += fp
    return agg_tp, agg_fn, agg_fp 
Example 32
Project: rpm2swidtag   Author: swidtags   File: repodata.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, repo):
		self.repo = repo
		self.href = "repodata/repomd.xml"
		self.path = path.join(self.repo.path, self.href)
		if not path.isfile(self.path):
			raise Error("file %s does not exist" % self.path) from None
		self.xml = etree.parse(self.path, etree.XMLParser(remove_blank_text = True))
		for href in self.xml.xpath("/repo:repomd/repo:data[@type = 'primary']/repo:location/@href", namespaces = { 'repo': REPO_XMLNS }):
			self.primary_path = href
			break
		if not self.primary_path:
			raise Error("%s does not have primary data" % self.path) from None
		self.__primary = None 
Example 33
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: runAllModels.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 34
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_no_cmt_no_tie.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist = []
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    for files in onlyfiles:
        if files.endswith(filetype):
            caefilelist.append(files)
    return caefilelist 
Example 35
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_Ortho.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 36
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_no_cmt.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist = []
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    for files in onlyfiles:
        if files.endswith(filetype):
            caefilelist.append(files)
    return caefilelist 
Example 37
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: postProcess.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] \
            files from chosen directory'
    caefilelist = []
    onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
    for files in onlyfiles:
        if files.endswith(filetype):
            caefilelist.append(files)
    return caefilelist 
Example 38
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput_Augmented_With_Yield.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 39
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_Yield.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 40
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_WITHOUT_CEMENT.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print 'Loading list of [chosen file type] files from chosen directory'
    inpfilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           inpfilelist.append(files)
    return inpfilelist 
Example 41
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_FOR_CEMENT.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print 'Loading list of [chosen file type] files from chosen directory'
    inpfilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           inpfilelist.append(files)
    return inpfilelist 
Example 42
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput_Augmented_With_Ortho.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 43
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: setup_models_FOR_CEMENT_WITH_INTERFACE.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print >> sys.__stdout__, 'Loading list of [chosen file type] files from chosen directory'
    inpfilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           inpfilelist.append(files)
    return inpfilelist 
Example 44
Project: python-scripts-for-scanip-abaqus   Author: mngad   File: caeToStiffnessOutput_DENSITY_CHANGE.py    GNU General Public License v3.0 5 votes vote down vote up
def getfiles(mypath, filetype):
    print 'Loading list of [chosen file type] files from chosen directory'
    caefilelist=[]
    onlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f))]
    for files in onlyfiles:
        if files.endswith(filetype):
           caefilelist.append(files)
    return caefilelist 
Example 45
Project: mmdetection   Author: open-mmlab   File: pascal_voc.py    Apache License 2.0 5 votes vote down vote up
def cvt_annotations(devkit_path, years, split, out_file):
    if not isinstance(years, list):
        years = [years]
    annotations = []
    for year in years:
        filelist = osp.join(devkit_path,
                            'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
        if not osp.isfile(filelist):
            print('filelist does not exist: {}, skip voc{} {}'.format(
                filelist, year, split))
            return
        img_names = mmcv.list_from_file(filelist)
        xml_paths = [
            osp.join(devkit_path,
                     'VOC{}/Annotations/{}.xml'.format(year, img_name))
            for img_name in img_names
        ]
        img_paths = [
            'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
            for img_name in img_names
        ]
        part_annotations = mmcv.track_progress(parse_xml,
                                               list(zip(xml_paths, img_paths)))
        annotations.extend(part_annotations)
    mmcv.dump(annotations, out_file)
    return annotations 
Example 46
Project: google_streetview   Author: rrwen   File: test_api_results.py    MIT License 5 votes vote down vote up
def tearDown(self):
    if isfile(self.tempfile):
      remove(self.tempfile)
    if isdir(self.tempdir):
      rmtree(self.tempdir) 
Example 47
Project: google_streetview   Author: rrwen   File: test_cli.py    MIT License 5 votes vote down vote up
def tearDown(self):
    if isfile(self.tempfile):
      remove(self.tempfile)
    if isdir(self.tempdir):
      rmtree(self.tempdir) 
Example 48
Project: flasky   Author: RoseOu   File: __init__.py    MIT License 5 votes vote down vote up
def get_resource(self, request, filename):
        """Return a static resource from the shared folder."""
        filename = join(dirname(__file__), 'shared', basename(filename))
        if isfile(filename):
            mimetype = mimetypes.guess_type(filename)[0] \
                or 'application/octet-stream'
            f = open(filename, 'rb')
            try:
                return Response(f.read(), mimetype=mimetype)
            finally:
                f.close()
        return Response('Not Found', status=404) 
Example 49
Project: tvdbsimple   Author: phate89   File: setup.py    GNU General Public License v3.0 5 votes vote down vote up
def read(fname):

    here = path.join(path.abspath(path.dirname(__file__)), fname)
    txt = ''
    if (path.isfile(here)):
        # Get the long description from the README file
        with open(here, encoding='utf-8') as f:
            txt= f.read()
    return txt 
Example 50
Project: domain_gen   Author: rootVIII   File: utils.py    MIT License 5 votes vote down vote up
def add_data(path, new_data):
    path += 'domains.json'
    if isfile(path):
        try:
            existing = read(path)
            write(path, existing + new_data)
        except Exception:
            m = 'Corrupt domains.json.\n'
            m += 'Creating new domains.json'
            print(m)
            write(path, new_data)
    else:
        write(path, new_data) 
Example 51
Project: domain_gen   Author: rootVIII   File: utils.py    MIT License 5 votes vote down vote up
def check_paths(words, configs):
    if not isfile(words) or not isfile(configs):
        print('Missing conifg.yml or word_list.json')
        exit(1) 
Example 52
Project: sinking   Author: Arteneko   File: config.py    Apache License 2.0 5 votes vote down vote up
def load_config(file_path):
    """
    Tries to load the YAML-formatted configuration file at given path, throws FileNotFoundError if not found.
    :param file_path: the file path
    :return: the loaded configuration
    """
    if not path.isfile(file_path):
        raise FileNotFoundError("Expected to find %s, but couldn't find it" % file_path)
    with open(file_path, "r") as file:
        return yaml.load(file, Loader=yaml.SafeLoader) 
Example 53
Project: MetrixReloaded   Author: Scrounger   File: MetrixReloadedCover.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def findCover(self, path):
		fpath = p1 = p2 = p3 = ""
		name, ext = os_path.splitext(path)
		ext = ext.lower()

		if os_path.isfile(path):
			dir = os_path.dirname(path)
			p1 = name
			p2 = os_path.join(dir, os_path.basename(dir))

		elif os_path.isdir(path):
			if path.lower().endswith("/bdmv"):
				dir = path[:-5]
				if dir.lower().endswith("/brd"): dir = dir[:-4]
			elif path.lower().endswith("video_ts"):
				dir = path[:-9]
				if dir.lower().endswith("/dvd"): dir = dir[:-4]
			else:
				dir = path
				p2 = os_path.join(dir, "folder")

			prtdir, dirname = os_path.split(dir)
			p1 = os_path.join(dir, dirname)
			p3 = os_path.join(prtdir, dirname)

		pathes = (p1, p2, p3)
		for p in pathes:
			for ext in self.exts:
				path = p + ext
				if os_path.exists(path): break
			if os_path.exists(path):
				fpath = path
				break
		return (p1, fpath) 
Example 54
Project: calmjs   Author: calmjs   File: test_artifact.py    GNU General Public License v2.0 5 votes vote down vote up
def test_prepare_base_parent_is_file(self):
        basedir = utils.mkdtemp(self)
        export_target = join(basedir, 'artifacts', 'export.js')
        with open(join(basedir, 'artifacts'), 'w'):
            pass

        with self.assertRaises(ToolchainAbort):
            with pretty_logging(stream=mocks.StringIO()) as s:
                self.assertFalse(prepare_export_location(export_target))

        self.assertIn("cannot export to '%s'" % export_target, s.getvalue())
        self.assertTrue(isfile(join(basedir, 'artifacts'))) 
Example 55
Project: mx   Author: graalvm   File: select_jdk.py    GNU General Public License v2.0 5 votes vote down vote up
def is_valid_jdk(jdk):
    """
    Determines if `jdk` looks like a valid JDK directory.

    :return: True if there's a ``java`` executable in ``jdk/bin``
    """
    java_exe = join(jdk, 'bin', 'java')
    if not exists(java_exe):
        java_exe += '.exe'
    return isfile(java_exe) and os.access(java_exe, os.X_OK) 
Example 56
Project: Generative-Latent-Optimization-Tensorflow   Author: clvrai   File: download.py    MIT License 5 votes vote down vote up
def check_file(data_dir):
    if osp.exists(data_dir):
        if osp.isfile(osp.join(data_dir, 'data.hdf5')) and \
           osp.isfile(osp.join(data_dir, 'id.txt')):
            return True
    else:
        os.mkdir(data_dir)
    return False 
Example 57
Project: openhatch   Author: campbe13   File: vim2pygments.py    GNU Affero General Public License v3.0 5 votes vote down vote up
def main():
    if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
        print 'Usage: %s <filename.vim>' % sys.argv[0]
        return 2
    if sys.argv[1] in ('-v', '--version'):
        print '%s %s' % (SCRIPT_NAME, SCRIPT_VERSION)
        return
    filename = sys.argv[1]
    if not (path.exists(filename) and path.isfile(filename)):
        print 'Error: %s not found' % filename
        return 1
    convert(filename, sys.stdout)
    sys.stdout.write('\n') 
Example 58
Project: aridi   Author: dpgon   File: gathering0.py    GNU General Public License v3.0 5 votes vote down vote up
def checkcommand(command):
        # Check if command is available in current dir
        if isfile(command):
            return os.getcwd()+"/"+command

        # Check if command is available in path
        path = os.environ["PATH"].split(':')
        for item in path:
            if isfile(item + "/" + command):
                return item + "/" + command

        # Return None if command not found
        return None 
Example 59
Project: aridi   Author: dpgon   File: gathering0.py    GNU General Public License v3.0 5 votes vote down vote up
def canread(filename, uid, gids):
        if not isfile(filename):
            return -1
        info = os.stat(filename)
        owner = info.st_uid
        group = info.st_gid
        if info.st_mode & 0b000000100:
            return 3
        elif info.st_mode & 0b000100000 and group in gids:
            return 2
        elif owner == uid:
            return 1
        else:
            return 0 
Example 60
Project: aridi   Author: dpgon   File: gathering0.py    GNU General Public License v3.0 5 votes vote down vote up
def _examinefiles(self):
        for item in self.interesting_files:
            if isfile(item[0]):
                perm = str(oct(os.stat(item[0]).st_mode))[-3:]
            else:
                perm = 0
            self.files[item[0]] = [item[1], self.canread(item[0], self.uid, self.gids), perm] 
Example 61
Project: operator-courier   Author: operator-framework   File: test_verify.py    Apache License 2.0 5 votes vote down vote up
def test_verify_valid_nested_sources_with_output(source_dir):
    with TemporaryDirectory() as temp_dir:
        outfile_path = join(temp_dir, "output.json")
        process = subprocess.Popen(f'operator-courier verify {source_dir} '
                                   f'--validation-output {outfile_path}',
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        exit_code = process.wait()
        assert isfile(outfile_path)
        with open(outfile_path, 'r') as f:
            validation_json = loads(f.read())

    assert exit_code == 0
    assert not validation_json['errors'] 
Example 62
Project: operator-courier   Author: operator-framework   File: test_verify.py    Apache License 2.0 5 votes vote down vote up
def test_verify_invalid_nested_sources_with_output(source_dir):
    with TemporaryDirectory() as temp_dir:
        outfile_path = join(temp_dir, "output.json")
        process = subprocess.Popen(f'operator-courier verify {source_dir} '
                                   f'--validation-output {outfile_path}',
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        exit_code = process.wait()
        assert isfile(outfile_path)
        with open(outfile_path, 'r') as f:
            validation_json = loads(f.read())

    assert exit_code != 0
    assert validation_json['errors'] 
Example 63
Project: snn_global_pattern_induction   Author: chrhenning   File: seven_segment_data.py    Apache License 2.0 5 votes vote down vote up
def _read_data(filename):
        """Reading the dataset from file into a list of binary vectors and a
        list of corresponding labels.

        Args:
            filename: Path and name of the file that contains the labels.

        Returns:
            inputs: A list of binary vectors, representing the LED state during
                the displaying of a digit.
            labels: List of corresponding ground-truth labels to inputs.
        """
        assert(path.isfile(filename))

        logger.info('Reading samples from %s.' % filename)
        with open(filename, "r") as f:

            inputs = []
            labels = []

            for line in f:
                sample = line.strip().split(',')
                assert(len(sample) == 8)
                sample = list(map( int, sample))

                inputs.append(sample[:-1])
                labels.append(sample[-1])

            logger.info('Number of samples in file: %d' % (len(inputs)))

            return inputs, labels 
Example 64
Project: snn_global_pattern_induction   Author: chrhenning   File: mnist_data.py    Apache License 2.0 5 votes vote down vote up
def _read_labels(filename):
        """Reading a set of labels from a file.

        Args:
            filename: Path and name of the byte file that contains the labels.

        Returns:
            A list of labels.
        """
        assert(path.isfile(filename))

        logger.info('Reading labels from %s.' % filename)
        with open(filename, "rb") as f:
            # Skip magic number.
            _ = f.read(4)
            # Get number of labels in this file.
            num = int.from_bytes(f.read(4), byteorder='big')
            logger.info('Number of labels in current file: %d' % num)

            labels = []

            i = 0
            byte = f.read(1)
            while byte:
                i += 1

                label = struct.unpack('B', byte)[0]
                labels.append(label)

                byte = f.read(1)

            assert(i == num)

            return labels 
Example 65
Project: imgcomp-cvpr   Author: fab-jul   File: val_files.py    GNU General Public License v3.0 5 votes vote down vote up
def __init__(self, out_dir):
        self.out_dir = out_dir
        self.p = path.join(out_dir, _MEASURES_FILE_NAME)
        if not path.isfile(self.p):
            raise FileNotFoundError('No {} for {}'.format(_MEASURES_FILE_NAME, out_dir)) 
Example 66
Project: sanic   Author: huge-success   File: release.py    MIT License 5 votes vote down vote up
def _fetch_current_version(config_file: str) -> str:
    if path.isfile(config_file):
        config_parser = RawConfigParser()
        with open(config_file) as cfg:
            config_parser.read_file(cfg)
            return (
                config_parser.get("version", "current_version")
                or _fetch_default_calendar_release_version()
            )
    else:
        return _fetch_default_calendar_release_version() 
Example 67
Project: password_pwncheck   Author: CboeSecurity   File: pwned-password-server.py    MIT License 5 votes vote down vote up
def __init__(self,filepaths):
        self.search_files = {}
        if type(filepaths) == type(""):
            filepaths = [join(filepaths, f) for f in listdir(filepaths) if isfile(join(filepaths, f))]
        if type(filepaths) == type([]):
            for filepath in filepaths:
                self.search_files[filepath] = SearchFile(filepath) 
Example 68
Project: password_pwncheck   Author: CboeSecurity   File: pwnpass.py    MIT License 5 votes vote down vote up
def __init__(self,filepaths,debug=False):
        self.debug = debug
        self.search_files = {}
        if type(filepaths) == type(""):
            filepaths = [join(filepaths, f) for f in listdir(filepaths) if isfile(join(filepaths, f))]
        if type(filepaths) == type([]):
            for filepath in filepaths:
                self.search_files[filepath] = SearchFile(filepath,debug)
        if self.debug:
            print(" * Will Search the following:\n    %s"%("\n    ").join(filepaths)) 
Example 69
Project: bitpay-brick   Author: javgh   File: config.py    MIT License 5 votes vote down vote up
def read_api_key():
    api_key_file = expanduser('~') + '/' + API_KEY_FILE
    if not isfile(api_key_file):
        return None
    with open(api_key_file, 'r') as f:
        api_key = f.read()
    return api_key.strip() 
Example 70
Project: bitpay-brick   Author: javgh   File: test_bitpayprovider.py    MIT License 5 votes vote down vote up
def read_test_api_key():
    api_key_file = expanduser('~') + '/' + TEST_API_KEY_FILE
    if not isfile(api_key_file):
        return None
    with open(api_key_file, 'r') as f:
        api_key = f.read()
    return api_key.strip() 
Example 71
Project: razzy-spinner   Author: rafasashi   File: crubadan.py    GNU General Public License v3.0 5 votes vote down vote up
def _load_lang_ngrams(self, lang):
        ''' Load single n-gram language file given the ISO 639-3 language code
            and return its FreqDist '''

        if lang not in self.langs():
            raise RuntimeError("Unsupported language.")

        crubadan_code = self.iso_to_crubadan(lang)
        ngram_file = path.join(self.root, crubadan_code + '-3grams.txt')

        if not path.isfile(ngram_file):
            raise Runtime("No N-gram file found for requested language.")

        counts = FreqDist()
        if PY3:
            f = open(ngram_file, 'r', encoding='utf-8')
        else:
            f = open(ngram_file, 'rU')

        for line in f:
            if PY3:
                data = line.split(' ')
            else:
                data = line.decode('utf8').split(' ')

            ngram = data[1].strip('\n')
            freq = int(data[0])
            
            counts[ngram] = freq
            
        return counts 
Example 72
Project: domain_discovery_API   Author: VIDA-NYU   File: runSeedFinder.py    GNU General Public License v3.0 5 votes vote down vote up
def collect_seed_urls(self, updateStatusCB, shouldTerminateCB, p):
        print "\n\n\n COLLECT SEED URLS ",self.query," ", self.csv_file

        curr_subquery = None
        urls = []
        # Wait for the self.csv_file to be created
        while True:
            try:
                if isfile(self.csv_file):
                    break
                sleep(5)
            except IOError, message:
                print "File is locked (unable to open in append mode). %s.", message 
Example 73
Project: domain_discovery_API   Author: VIDA-NYU   File: runSeedFinder.py    GNU General Public License v3.0 5 votes vote down vote up
def execSeedFinder(self, terms, data_path, updateStatusCB, shouldTerminateCB, es_info):
        print "\n\n\n EXEC SEED FINDER", terms, " ", data_path, " \n\n\n"
        domain_name = es_info['activeDomainIndex']
  
        data_dir = data_path + "/data/"
        data_crawler  = data_dir + domain_name
        data_training = data_crawler + "/training_data/"
    
        crawlermodel_dir = data_crawler + "/models/"

        if (not isdir(crawlermodel_dir)):
            return

        seed_dir = data_crawler + "/seedFinder/"
    
        if (not isdir(seed_dir)):
            # Create dir if it does not exist
            makedirs(seed_dir)

        if (not isdir(seed_dir+"log")):
            makedirs(seed_dir+"log")
    
        csv_file = seed_dir + terms.replace(" ","_") + "_results.csv"
        
        if isfile(csv_file):
            remove(csv_file)

        ache_home = environ['ACHE_HOME']
        
        comm = ache_home + "/bin/ache seedFinder --csvPath " + csv_file + " --initialQuery \"" +terms + "\" --modelPath " + crawlermodel_dir + " --seedsPath " + seed_dir + " --maxPages 2 --maxQueries 25"

        encoded_query = urllib.quote(terms).replace("%5C","")

        f_sf_log = open(seed_dir+"log/seeds_"+"+".join(encoded_query.split("%20"))+".log", 'w')
        f_sf_err_log = open(seed_dir+"log/seeds_"+"+".join(encoded_query.split("%20"))+"_error.log", 'w')

        if not shouldTerminateCB(terms):
            p = Popen(comm, shell=True, stderr=f_sf_err_log, stdout=f_sf_log)
            updateStatusCB(terms, "terminate_process", p)
            # Upload seeds generated by the SeedFinder
            CollectSeeds(terms, csv_file, es_info).collect_seed_urls(updateStatusCB, shouldTerminateCB, p) 
Example 74
Project: pyblish-win   Author: pyblish   File: file_util.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def move_file (src, dst, verbose=1, dry_run=0):
    """Move a file 'src' to 'dst'.

    If 'dst' is a directory, the file will be moved into it with the same
    name; otherwise, 'src' is just renamed to 'dst'.  Return the new
    full name of the file.

    Handles cross-device moves on Unix using 'copy_file()'.  What about
    other systems???
    """
    from os.path import exists, isfile, isdir, basename, dirname
    import errno

    if verbose >= 1:
        log.info("moving %s -> %s", src, dst)

    if dry_run:
        return dst

    if not isfile(src):
        raise DistutilsFileError("can't move '%s': not a regular file" % src)

    if isdir(dst):
        dst = os.path.join(dst, basename(src))
    elif exists(dst):
        raise DistutilsFileError(
              "can't move '%s': destination '%s' already exists" %
              (src, dst))

    if not isdir(dirname(dst)):
        raise DistutilsFileError(
              "can't move '%s': destination '%s' not a valid path" % \
              (src, dst))

    copy_it = 0
    try:
        os.rename(src, dst)
    except os.error, (num, msg):
        if num == errno.EXDEV:
            copy_it = 1
        else:
            raise DistutilsFileError(
                  "couldn't move '%s' to '%s': %s" % (src, dst, msg)) 
Example 75
Project: symbolator   Author: kevinpt   File: symbolator_sphinx.py    MIT License 4 votes vote down vote up
def render_symbol(self, code, options, format, prefix='symbol'):
    # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
    """Render symbolator code into a PNG or SVG output file."""

    symbolator_cmd = options.get('symbolator_cmd', self.builder.config.symbolator_cmd)
    hashkey = (code + str(options) + str(symbolator_cmd) +
               str(self.builder.config.symbolator_cmd_args)).encode('utf-8')

    # Use name option if present otherwise fallback onto SHA-1 hash
    name = options.get('name', sha1(hashkey).hexdigest())
    fname = '%s-%s.%s' % (prefix, name, format)
    relfn = posixpath.join(self.builder.imgpath, fname)
    outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)

    if path.isfile(outfn):
        return relfn, outfn

    if (hasattr(self.builder, '_symbolator_warned_cmd') and
       self.builder._symbolator_warned_cmd.get(symbolator_cmd)):
        return None, None

    ensuredir(path.dirname(outfn))

    # Symbolator expects UTF-8 by default
    if isinstance(code, text_type):
        code = code.encode('utf-8')

    cmd_args = [symbolator_cmd]
    cmd_args.extend(self.builder.config.symbolator_cmd_args)
    cmd_args.extend(['-i', '-', '-f', format, '-o', outfn])
    
    try:
        p = Popen(cmd_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
    except OSError as err:
        if err.errno != ENOENT:   # No such file or directory
            raise
        logger.warning('symbolator command %r cannot be run (needed for symbolator '
                       'output), check the symbolator_cmd setting', symbolator_cmd)
        if not hasattr(self.builder, '_symbolator_warned_cmd'):
            self.builder._symbolator_warned_cmd = {}
        self.builder._symbolator_warned_cmd[symbolator_cmd] = True
        return None, None
    try:
        # Symbolator may close standard input when an error occurs,
        # resulting in a broken pipe on communicate()
        stdout, stderr = p.communicate(code)
    except (OSError, IOError) as err:
        if err.errno not in (EPIPE, EINVAL):
            raise
        # in this case, read the standard output and standard error streams
        # directly, to get the error message(s)
        stdout, stderr = p.stdout.read(), p.stderr.read()
        p.wait()
    if p.returncode != 0:
        raise SymbolatorError('symbolator exited with error:\n[stderr]\n%s\n'
                            '[stdout]\n%s' % (stderr, stdout))
    if not path.isfile(outfn):
        raise SymbolatorError('symbolator did not produce an output file:\n[stderr]\n%s\n'
                            '[stdout]\n%s' % (stderr, stdout))
    return relfn, outfn 
Example 76
Project: snn_global_pattern_induction   Author: chrhenning   File: mnist_data.py    Apache License 2.0 4 votes vote down vote up
def __init__(self):
        """Read the MNIST digit classification dataset from file.

        This method checks whether the dataset has been read before (a pickle
        dump has been generated). If so, it reads the dump. Otherwise, it
        reads the data from scratch and creates a dump for future usage.

        Args:

        Returns:
        """
        super().__init__()

        start = time.time()

        logger.info('Reading MNIST dataset ...')

        # If data has been processed before.
        if path.isfile(config.mnist_pickle_dump):
            with open(config.mnist_pickle_dump, 'rb') as f:
                self._data = pickle.load(f)

        else:
            # read labels
            train_labels = MNISTData._read_labels(config.mnist_train_label)
            test_labels = MNISTData._read_labels(config.mnist_test_label)

            # read images
            train_inputs = MNISTData._read_images(config.mnist_train_images)
            test_inputs = MNISTData._read_images(config.mnist_test_images)

            # Bring these raw readings into the internal structure of the
            # Dataset class
            assert(len(train_labels) == len(train_inputs))
            assert(len(test_labels) == len(test_inputs))

            # Generate a list of training samples
            for i, raw_img in enumerate(train_inputs):
                label = train_labels[i]
                sample = MNISTData._generate_sample(raw_img, label)
                self.samples.append(sample)
                self.train.append(sample)

            # Generate a list of test samples
            for i, raw_img in enumerate(test_inputs):
                label = test_labels[i]
                sample = MNISTData._generate_sample(raw_img, label)
                self.samples.append(sample)
                self.test.append(sample)

            # Compute input and output number of neurons.
            self._determine_in_out_size()

            # Save read dataset to allow faster reading in future.
            with open(config.mnist_pickle_dump, 'wb') as f:
                pickle.dump(self._data, f)

        end = time.time()
        logger.info('Elapsed time to read dataset: %f sec' % (end-start)) 
Example 77
Project: snn_global_pattern_induction   Author: chrhenning   File: mnist_data.py    Apache License 2.0 4 votes vote down vote up
def _read_images(filename):
        """Reading a set of images from a file.

        Args:
            filename: Path and name of the byte file that contains the images.

        Returns:
            A list of images. Each image will be a 2D numpy array of type uint8.
        """
        assert(path.isfile(filename))

        logger.info('Reading images from %s.' % filename)
        with open(filename, 'rb') as f:
            # Skip magic number
            _ = f.read(4)
            # Get number of images in this file.
            num = int.from_bytes(f.read(4), byteorder='big')
            logger.info('Number of images in current file: %d' % num)
            # Get number of rows and columns.
            rows = int.from_bytes(f.read(4), byteorder='big')
            cols = int.from_bytes(f.read(4), byteorder='big')

            images = []
            curr_img = None

            i = 0
            byte = f.read(1)
            while byte:
                # If the current byte marks the beginning of a new image.
                if i % (rows*cols) == 0:
                    curr_img = np.zeros((rows,cols), dtype=np.uint8)
                    images.append(curr_img)

                intensity = struct.unpack('B', byte)[0]
                # Compute row and column offset of current pixel.
                s = i % (rows*cols)
                r = s // cols
                c = s % cols
                curr_img[r][c] = intensity

                i += 1
                byte = f.read(1)

            assert(len(images) == num)

            return images 
Example 78
Project: Repobot   Author: Desgard   File: display.py    MIT License 4 votes vote down vote up
def _get_display_formatter(self,
                               dirname_output_format,
                               fname_output_format,
                               fp_format,
                               fp_cleaner=None):
        """ generate built-in formatter function

           this is used to define both the notebook and terminal built-in
            formatters as they only differ by some wrapper text for each entry

           dirname_output_format: string to use for formatting directory
            names, dirname will be substituted for a single "%s" which
            must appear in this string
           fname_output_format: string to use for formatting file names,
            if a single "%s" appears in the string, fname will be substituted
            if two "%s" appear in the string, the path to fname will be
             substituted for the first and fname will be substituted for the
             second
           fp_format: string to use for formatting filepaths, must contain
            exactly two "%s" and the dirname will be subsituted for the first
            and fname will be substituted for the second
        """
        def f(dirname, fnames, included_suffixes=None):
            result = []
            # begin by figuring out which filenames, if any,
            # are going to be displayed
            display_fnames = []
            for fname in fnames:
                if (isfile(join(dirname,fname)) and
                       (included_suffixes is None or
                        splitext(fname)[1] in included_suffixes)):
                      display_fnames.append(fname)

            if len(display_fnames) == 0:
                # if there are no filenames to display, don't print anything
                # (not even the directory name)
                pass
            else:
                # otherwise print the formatted directory name followed by
                # the formatted filenames
                dirname_output_line = dirname_output_format % dirname
                result.append(dirname_output_line)
                for fname in display_fnames:
                    fp = fp_format % (dirname,fname)
                    if fp_cleaner is not None:
                        fp = fp_cleaner(fp)
                    try:
                        # output can include both a filepath and a filename...
                        fname_output_line = fname_output_format % (fp, fname)
                    except TypeError:
                        # ... or just a single filepath
                        fname_output_line = fname_output_format % fname
                    result.append(fname_output_line)
            return result
        return f 
Example 79
Project: domain_discovery_API   Author: VIDA-NYU   File: domain_discovery_model.py    GNU General Public License v3.0 4 votes vote down vote up
def runSeedFinder(self, terms, session):
    """ Execute the SeedFinder witht the specified terms. The details of the url results of the SeedFinder
    are uploaded into elasticsearch.

    Parameters:
        terms (str): terms for the inital query

        session (json): Should contain the domainId

    Returns:
        None
    """
    domainId = session['domainId']
    es_info = self._esInfo(domainId);


    # Execute SeedFinder in a new thread
    if self.runningSeedFinders.get(terms) is not None:
      if not self.runningSeedFinders[terms]['shouldTerminate']:
        return self.runningSeedFinders[terms]['status']

    data_dir = self._path + "/data/"
    data_domain  = data_dir + es_info['activeDomainIndex']

    domainmodel_dir = data_domain + "/models/"

    self.runningSeedFinders[terms] = {"domain": self._domains[domainId]['domain_name'], "status": "Starting", "description":"Query: "+terms, 'shouldTerminate': False}

    if (not isfile(domainmodel_dir+"pageclassifier.model")):
      self.runningSeedFinders[terms]["status"] = "Creating Model"
      self._crawlerModel.createModel(session, zip=False)

    print "\n\n\n RUN SEED FINDER",terms,"\n\n\n"

    self.runningSeedFinders[terms]["status"] = "Starting"

    if not self.runningSeedFinders[terms]['shouldTerminate']:
      p = self.pool.submit(self.seedfinder.execSeedFinder, terms, self._path, self._updateSeedFinderStatus, self._seedfinderShouldTerminate, es_info)
      self.runningSeedFinders[terms]["process"] = p
      p.add_done_callback(self._seedfinderCompleted)
    else:
      del self.runningSeedFinders[terms]
      return "Terminated"

    return "Starting" 
Example 80
Project: domain_discovery_API   Author: VIDA-NYU   File: crawler_model.py    GNU General Public License v3.0 4 votes vote down vote up
def _createResultModelZip(self, session):

        """ Create a zip of classified data

        Parameters:
        session (json): should have domainId

        Returns:
        Zip file url or message text
        """


        path = self._path

        es_info = self._esInfo(session["domainId"])

        data_dir = path + "/data/"

        #print data_dir
        #print es_info['activeDomainIndex']

        data_domain  = data_dir + es_info['activeDomainIndex']
        domainmodel_dir = data_domain + "/models/"

        zip_dir = data_dir
        #Create tha model in the client (client/build/models/). Just the client site is being exposed
        saveClientSite = zip_dir.replace('server/data/','client/build/models/')
        if (not isdir(saveClientSite)):
            makedirs(saveClientSite)
        zip_filename = saveClientSite + es_info['activeDomainIndex'] + "_results_model.zip"

        with ZipFile(zip_filename, "w") as modelzip:
            if (isfile(data_domain +"/relevantseeds.txt")):
                print "zipping file: "+data_domain +"/relevantseeds.txt"
                modelzip.write(data_domain +"/relevantseeds.txt", es_info['activeDomainIndex'] + "_relevant_seeds.txt")
                chmod(zip_filename, 0o777)
            if (isfile(data_domain +"/irrelevantseeds.txt")):
                print "zipping file: "+data_domain +"/irrelevantseeds.txt"
                modelzip.write(data_domain +"/irrelevantseeds.txt", es_info['activeDomainIndex'] + "_irrelevant_seeds.txt")
                chmod(zip_filename, 0o777)
            if (isfile(data_domain +"/unsureseeds.txt")):
                print "zipping file: "+data_domain +"/unsureseeds.txt"
                modelzip.write(data_domain +"/unsureseeds.txt", es_info['activeDomainIndex'] + "_unsure_seeds.txt")
                chmod(zip_filename, 0o777)

        return "models/" + es_info['activeDomainIndex'] + "_results_model.zip"

#######################################################################################################