Python tempfile._get_candidate_names() Examples

The following are code examples for showing how to use tempfile._get_candidate_names(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: gym-malware   Author: endgameinc   File: manipulate2.py    MIT License 6 votes vote down vote up
def upx_unpack(self, seed=None):
        # dump bytez to a temporary file
        tmpfilename = os.path.join(
            tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))

        with open(tmpfilename, 'wb') as outfile:
            outfile.write(self.bytez)

        with open(os.devnull, 'w') as DEVNULL:
            retcode = subprocess.call(
                ['upx', tmpfilename, '-d', '-o', tmpfilename + '_unpacked'], stdout=DEVNULL, stderr=DEVNULL)

        os.unlink(tmpfilename)

        if retcode == 0:  # sucessfully unpacked
            with open(tmpfilename + '_unpacked', 'rb') as result:
                self.bytez = result.read()

            os.unlink(tmpfilename + '_unpacked')

        return self.bytez 
Example 2
Project: iMIMIC-RCVs   Author: medgift   File: utils.py    MIT License 6 votes vote down vote up
def apply_modifications(model, custom_objects=None):
    """Applies modifications to the model layers to create a new Graph. For example, simply changing
    `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated
    with modified inbound and outbound tensors because of change in layer building function.

    Args:
        model: The `keras.models.Model` instance.

    Returns:
        The modified model with changes applied. Does not mutate the original `model`.
    """
    # The strategy is to save the modified model and load it back. This is done because setting the activation
    # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the
    # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since
    # multiple inbound and outbound nodes are allowed with the Graph API.
    model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
    try:
        model.save(model_path)
        return load_model(model_path, custom_objects=custom_objects)
    finally:
        os.remove(model_path) 
Example 3
Project: sregistry-cli   Author: singularityhub   File: fileio.py    Mozilla Public License 2.0 6 votes vote down vote up
def get_tmpdir(requested_tmpdir=None, prefix="", create=True):
    """get a temporary directory for an operation. If SREGISTRY_TMPDIR
       is set, return that. Otherwise, return the output of tempfile.mkdtemp

       Parameters
       ==========
       requested_tmpdir: an optional requested temporary directory, first
       priority as is coming from calling function.
       prefix: Given a need for a sandbox (or similar), we will need to 
       create a subfolder *within* the SREGISTRY_TMPDIR.
       create: boolean to determine if we should create folder (True)
    """
    from sregistry.defaults import SREGISTRY_TMPDIR

    # First priority for the base goes to the user requested.
    tmpdir = requested_tmpdir or SREGISTRY_TMPDIR

    prefix = prefix or "sregistry-tmp"
    prefix = "%s.%s" % (prefix, next(tempfile._get_candidate_names()))
    tmpdir = os.path.join(tmpdir, prefix)

    if not os.path.exists(tmpdir) and create is True:
        os.mkdir(tmpdir)

    return tmpdir 
Example 4
Project: ALCD   Author: CNES   File: layers_creation.py    GNU General Public License v3.0 6 votes vote down vote up
def create_no_data_shp(global_parameters, force=False):
    '''
    Create automatically polygons overs the no_data pixels
    in both the clear and cloudy date
    '''
    main_dir = global_parameters["user_choices"]["main_dir"]

    tmp_name = next(tempfile._get_candidate_names())
    tmp_tif = op.join('tmp', 'no_data_mask_{}.tif'.format(tmp_name))

    # Create the temporary no data TIF
    L1C_band_composition.create_no_data_tif(global_parameters, tmp_tif)

    # polygonize the raster
    print("  Polygonization")
    tmp_shp = op.join('tmp', 'no_data_mask_{}.shp'.format(tmp_name))
    out_layer = 'no_data_shape'
    command = 'gdal_polygonize.py {} -mask {} -f "ESRI Shapefile" {} {} class'.format(
        tmp_tif, tmp_tif, tmp_shp, out_layer)
    subprocess.call(command, shell=True)

    # simplify the polygons
    out_shp = op.join(main_dir, 'In_data', 'Masks', 'no_data.shp')
    simplify_geometry(tmp_shp, out_shp, tolerance=100)
    return 
Example 5
Project: ALCD   Author: CNES   File: L1C_band_composition.py    GNU General Public License v3.0 6 votes vote down vote up
def create_variation_coeff(in_tif, in_channel, out_tif, radius=3, resolution=60):
    '''
    Create a texture variation coeff feature
    '''
    tmp_name = next(tempfile._get_candidate_names())
    temp_tif = op.join('tmp', 'band_for_contours_density_{}.tif'.format(tmp_name))
    resize_band(in_tif, out_band=temp_tif, pixelresX=resolution, pixelresY=resolution)

    # Mean and others moments of the contours
    LocalStatisticExtraction = otbApplication.Registry.CreateApplication("LocalStatisticExtraction")
    LocalStatisticExtraction.SetParameterString("in", str(temp_tif))
    LocalStatisticExtraction.SetParameterInt("channel", int(in_channel))
    LocalStatisticExtraction.SetParameterInt("radius", radius)
    LocalStatisticExtraction.UpdateParameters()
    LocalStatisticExtraction.Execute()

    # Variation coeff is the variance over the mean
    MeanOnly = otbApplication.Registry.CreateApplication("BandMathX")
    MeanOnly.SetParameterString("out", str(out_tif))
    MeanOnly.AddImageToParameterInputImageList(
        "il", LocalStatisticExtraction.GetParameterOutputImage("out"))
    MeanOnly.SetParameterString("exp", "sqrt(im1b2)/im1b1")
    MeanOnly.UpdateParameters()
    MeanOnly.ExecuteAndWriteOutput() 
Example 6
Project: ListOfPublicationsFromInspireHEP   Author: wiso   File: check_biblio.py    GNU General Public License v3.0 6 votes vote down vote up
def modify_item(item):
    editor_command = os.environ.get("EDITOR")
    if not editor_command:
        print "you haven't defined a default EDITOR, (e.g. export EDITOR=emacs)"
        editor_command = raw_input("enter the command to open an editor (e.g. emacs/atom -w/...): ")
        os.environ['EDITOR'] = editor_command
    editor_command = editor_command.strip().split()

    tmp_filename = next(tempfile._get_candidate_names())

    with open(tmp_filename, 'w') as f:
        f.write(item)
    subprocess.call(editor_command + [tmp_filename])
    with open(tmp_filename) as f:
        new_item = f.read()
    os.remove(tmp_filename)
    return new_item 
Example 7
Project: RLs   Author: StepNeverStop   File: test_barracuda_converter.py    Apache License 2.0 6 votes vote down vote up
def test_barracuda_converter():
    path_prefix = os.path.dirname(os.path.abspath(__file__))
    tmpfile = os.path.join(
        tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()) + ".nn"
    )

    # make sure there are no left-over files
    if os.path.isfile(tmpfile):
        os.remove(tmpfile)

    tf2bc.convert(path_prefix + "/BasicLearning.pb", tmpfile)

    # test if file exists after conversion
    assert os.path.isfile(tmpfile)
    # currently converter produces small output file even if input file is empty
    # 100 bytes is high enough to prove that conversion was successful
    assert os.path.getsize(tmpfile) > 100

    # cleanup
    os.remove(tmpfile) 
Example 8
Project: singularity-cli   Author: singularityhub   File: base.py    Mozilla Public License 2.0 6 votes vote down vote up
def _get_conversion_outfile(self):
        """a helper function to return a conversion temporary output file
           based on kind of conversion

           Parameters
           ==========
           convert_to: a string either docker or singularity, if a different

        """
        prefix = "spythonRecipe"
        if hasattr(self, "name"):
            prefix = self.name
        suffix = next(tempfile._get_candidate_names())
        return "%s.%s" % (prefix, suffix)

    # Printing 
Example 9
Project: hltex   Author: agajews   File: pybox.py    MIT License 6 votes vote down vote up
def fetch_generated_files(self):
        import hlbox
        import tempfile

        tmp_dir = os.path.join(
            tempfile._get_default_tempdir(),  # pylint: disable=protected-access
            "hltex_python_"
            + next(tempfile._get_candidate_names()),  # pylint: disable=protected-access
        )
        os.mkdir(tmp_dir)
        hlbox.runline(self.sandbox, "None\n")  # trigger tar
        hlbox.download(self.sandbox, tmp_dir)

        generated_files = []
        for f in os.listdir(tmp_dir):
            if os.path.isfile(os.path.join(tmp_dir, f)) and f != "main.py":
                generated_files.append(os.path.join(tmp_dir, f))
        return generated_files 
Example 10
Project: RENAT   Author: bachng2017   File: Fic.py    Apache License 2.0 6 votes vote down vote up
def get_element_image(self,element=u'//body',filename=None):
        """ Get and opencv image object of the element and save it to file

        Returns a numpy array and temporarily filename
        """
        result_path = Common.get_result_path()
        tmp_file = '%s/screen_%s.png' % (Common.get_result_path(),next(tempfile._get_candidate_names()))
        self._selenium.capture_page_screenshot(tmp_file)
        _element = self._selenium.get_webelement(element)
        pos = _element.location
        size = _element.size
        screen = cv2.imread(tmp_file)
        img = screen[int(pos['y']):int(pos['y']+size['height']),int(pos['x']):int(pos['x']+size['width'])]

        if filename:
            cv2.imwrite('%s/%s' % (result_path,filename),img)
        BuiltIn().log('Save image of element to file `%s`' % filename)
        return img,tmp_file 
Example 11
Project: mutateEXE   Author: jymcheong   File: manipulate2.py    MIT License 6 votes vote down vote up
def upx_unpack(self, seed=None):
        # dump bytez to a temporary file
        tmpfilename = os.path.join(
            tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))

        with open(tmpfilename, 'wb') as outfile:
            outfile.write(self.bytez)

        with open(os.devnull, 'w') as DEVNULL:
            retcode = subprocess.call(
                ['upx', tmpfilename, '-d', '-o', tmpfilename + '_unpacked'], stdout=DEVNULL, stderr=DEVNULL)

        os.unlink(tmpfilename)

        if retcode == 0:  # sucessfully unpacked
            with open(tmpfilename + '_unpacked', 'rb') as result:
                self.bytez = result.read()

            os.unlink(tmpfilename + '_unpacked')

        return self.bytez 
Example 12
Project: keras-vis   Author: raghakot   File: utils.py    MIT License 6 votes vote down vote up
def apply_modifications(model, custom_objects=None):
    """Applies modifications to the model layers to create a new Graph. For example, simply changing
    `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated
    with modified inbound and outbound tensors because of change in layer building function.

    Args:
        model: The `keras.models.Model` instance.

    Returns:
        The modified model with changes applied. Does not mutate the original `model`.
    """
    # The strategy is to save the modified model and load it back. This is done because setting the activation
    # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the
    # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since
    # multiple inbound and outbound nodes are allowed with the Graph API.
    model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
    try:
        model.save(model_path)
        return load_model(model_path, custom_objects=custom_objects)
    finally:
        os.remove(model_path) 
Example 13
Project: edgePy   Author: r-bioinformatics   File: test_DGEList.py    MIT License 6 votes vote down vote up
def test_cycle_dge_npz():

    import tempfile
    import os

    tempdir = tempfile.mkdtemp(prefix="edgePy_tmp")
    file_name = tempdir + os.sep + next(tempfile._get_candidate_names())
    dge_list_first = dge_list()
    dge_list_first.write_npz_file(filename=file_name)

    dge_list_second = DGEList(filename=file_name + ".npz")
    assert np.array_equal(dge_list_first.counts, dge_list_second.counts)
    assert np.array_equal(dge_list_first.genes, dge_list_second.genes)
    assert np.array_equal(dge_list_first.samples, dge_list_second.samples)
    assert np.array_equal(dge_list_first.norm_factors, dge_list_second.norm_factors)
    assert np.array_equal(dge_list_first.groups_list, dge_list_second.groups_list)
    os.remove(file_name + ".npz")
    os.rmdir(tempdir) 
Example 14
Project: deid   Author: pydicom   File: fileio.py    MIT License 6 votes vote down vote up
def get_temporary_name(prefix=None, ext=None):
    '''get a temporary name, can be used for a directory or file. This does so
       without creating the file, and adds an optional prefix
  
       Parameters
       ==========
       prefix: if defined, add the prefix after deid
       ext: if defined, return the file extension appended. Do not specify "."
    '''
    deid_prefix = 'deid-'
    if prefix:
        deid_prefix = 'deid-%s-' % prefix

    tmpname = os.path.join(tempfile.gettempdir(), 
                           '%s%s' % (deid_prefix,
                                     next(tempfile._get_candidate_names())))
    if ext:
        tmpname = '%s.%s' % (tmpname, ext)
    return tmpname


################################################################################
## FILE OPERATIONS #############################################################
################################################################################ 
Example 15
Project: pyblish-win   Author: pyblish   File: test_tempfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assertIsInstance(obj, tempfile._RandomNameSequence) 
Example 16
Project: pyblish-win   Author: pyblish   File: test_tempfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assertTrue(a is b) 
Example 17
Project: pyblish-win   Author: pyblish   File: test_tempfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def _mock_candidate_names(*names):
    return support.swap_attr(tempfile,
                             '_get_candidate_names',
                             lambda: iter(names)) 
Example 18
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: bigip_config.py    MIT License 5 votes vote down vote up
def merge(self, verify=True):
        temp_name = next(tempfile._get_candidate_names())
        remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
        temp_path = '/tmp/' + temp_name

        if self.module.check_mode:
            return True

        self.upload_to_device(temp_name)
        self.move_on_device(remote_path)
        response = self.merge_on_device(
            remote_path=temp_path, verify=verify
        )
        self.remove_temporary_file(remote_path=temp_path)
        return response 
Example 19
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: bigip_ucs_fetch.py    MIT License 5 votes vote down vote up
def src(self):
        if self._values['src'] is not None:
            return self._values['src']
        result = next(tempfile._get_candidate_names()) + '.ucs'
        self._values['src'] = result
        return result 
Example 20
Project: NiujiaoDebugger   Author: MrSrc   File: test_tempfile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assertIsInstance(obj, tempfile._RandomNameSequence) 
Example 21
Project: NiujiaoDebugger   Author: MrSrc   File: test_tempfile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assertTrue(a is b) 
Example 22
Project: NiujiaoDebugger   Author: MrSrc   File: test_tempfile.py    GNU General Public License v3.0 5 votes vote down vote up
def _mock_candidate_names(*names):
    return support.swap_attr(tempfile,
                             '_get_candidate_names',
                             lambda: iter(names)) 
Example 23
Project: ansible-role-f5ansible   Author: f5devcentral   File: bigip_apm_policy_fetch.py    GNU General Public License v3.0 5 votes vote down vote up
def file(self):
        if self._values['file'] is not None:
            return self._values['file']
        result = next(tempfile._get_candidate_names()) + '.tar.gz'
        self._values['file'] = result
        return result 
Example 24
Project: ansible-role-f5ansible   Author: f5devcentral   File: bigip_asm_policy_fetch.py    GNU General Public License v3.0 5 votes vote down vote up
def file(self):
        if self._values['file'] is not None:
            return self._values['file']
        if self.binary:
            result = next(tempfile._get_candidate_names()) + '.plc'
        else:
            result = next(tempfile._get_candidate_names()) + '.xml'
        self._values['file'] = result
        return result 
Example 25
Project: ansible-role-f5ansible   Author: f5devcentral   File: bigip_config.py    GNU General Public License v3.0 5 votes vote down vote up
def merge(self, verify=True):
        temp_name = next(tempfile._get_candidate_names())
        remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
        temp_path = '/tmp/' + temp_name

        if self.module.check_mode:
            return True

        self.upload_to_device(temp_name)
        self.move_on_device(remote_path)
        response = self.merge_on_device(
            remote_path=temp_path, verify=verify
        )
        self.remove_temporary_file(remote_path=temp_path)
        return response 
Example 26
Project: ansible-role-f5ansible   Author: f5devcentral   File: bigip_ucs_fetch.py    GNU General Public License v3.0 5 votes vote down vote up
def src(self):
        if self._values['src'] is not None:
            return self._values['src']
        result = next(tempfile._get_candidate_names()) + '.ucs'
        self._values['src'] = result
        return result 
Example 27
Project: gym-malware   Author: endgameinc   File: manipulate2.py    MIT License 5 votes vote down vote up
def upx_pack(self, seed=None):
        # tested with UPX 3.91
        random.seed(seed)
        tmpfilename = os.path.join(
            tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))

        # dump bytez to a temporary file
        with open(tmpfilename, 'wb') as outfile:
            outfile.write(self.bytez)

        options = ['--force', '--overlay=copy']
        compression_level = random.randint(1, 9)
        options += ['-{}'.format(compression_level)]
        # --exact
        # compression levels -1 to -9
        # --overlay=copy [default]

        # optional things:
        # --compress-exports=0/1
        # --compress-icons=0/1/2/3
        # --compress-resources=0/1
        # --strip-relocs=0/1
        options += ['--compress-exports={}'.format(random.randint(0, 1))]
        options += ['--compress-icons={}'.format(random.randint(0, 3))]
        options += ['--compress-resources={}'.format(random.randint(0, 1))]
        options += ['--strip-relocs={}'.format(random.randint(0, 1))]

        with open(os.devnull, 'w') as DEVNULL:
            retcode = subprocess.call(
                ['upx'] + options + [tmpfilename, '-o', tmpfilename + '_packed'], stdout=DEVNULL, stderr=DEVNULL)

        os.unlink(tmpfilename)

        if retcode == 0:  # successfully packed

            with open(tmpfilename + '_packed', 'rb') as infile:
                self.bytez = infile.read()

            os.unlink(tmpfilename + '_packed')

        return self.bytez 
Example 28
Project: helpme   Author: vsoch   File: fileio.py    Mozilla Public License 2.0 5 votes vote down vote up
def generate_temporary_file(folder="/tmp", prefix="helpme", ext="json"):
    """write a temporary file, in base directory with a particular extension.
      
       Parameters
       ==========
       folder: the base directory to write in. 
       prefix: the prefix to use
       ext: the extension to use.

    """
    tmp = next(tempfile._get_candidate_names())
    return "%s/%s.%s.%s" % (folder, prefix, tmp, ext) 
Example 29
Project: helpme   Author: vsoch   File: record.py    Mozilla Public License 2.0 5 votes vote down vote up
def generate_temporary_file(self, folder="/tmp", prefix="helpme", ext="json"):

        """write a temporary file, in base directory with a particular extension.
      
           Parameters
           ==========
           folder: the base directory to write in. 
           prefix: the prefix to use
           ext: the extension to use.

        """
        tmp = next(tempfile._get_candidate_names())
        return "%s/%s.%s.%s" % (folder, prefix, tmp, ext) 
Example 30
Project: RoboGif   Author: izacus   File: utilities.py    Apache License 2.0 5 votes vote down vote up
def get_new_temp_file_path(extension):
    tmp_dir = tempfile._get_default_tempdir()
    tmp_name = next(tempfile._get_candidate_names())
    tmp_file = os.path.join(tmp_dir, tmp_name + "." + extension)
    return tmp_file 
Example 31
Project: ironpython2   Author: IronLanguages   File: test_tempfile.py    Apache License 2.0 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assertIsInstance(obj, tempfile._RandomNameSequence) 
Example 32
Project: ironpython2   Author: IronLanguages   File: test_tempfile.py    Apache License 2.0 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assertTrue(a is b) 
Example 33
Project: ironpython2   Author: IronLanguages   File: test_tempfile.py    Apache License 2.0 5 votes vote down vote up
def _mock_candidate_names(*names):
    return support.swap_attr(tempfile,
                             '_get_candidate_names',
                             lambda: iter(names)) 
Example 34
Project: sregistry-cli   Author: singularityhub   File: api.py    Mozilla Public License 2.0 5 votes vote down vote up
def get_layer(self, image_id, repo_name, download_folder=None):
    """download an image layer (.tar.gz) to a specified download folder.

       Parameters
       ==========
       download_folder: download to this folder. If not set, uses temp.
       repo_name: the image name (library/ubuntu) to retrieve

    """
    url = self._get_layerLink(repo_name, image_id)

    bot.verbose("Downloading layers from %s" % url)

    download_folder = get_tmpdir(download_folder)
    download_folder = "%s/%s.tar.gz" % (download_folder, image_id)

    # Update user what we are doing
    bot.debug("Downloading layer %s" % image_id)

    # Step 1: Download the layer atomically
    file_name = "%s.%s" % (download_folder, next(tempfile._get_candidate_names()))

    tar_download = self.download(url, file_name)

    try:
        shutil.move(tar_download, download_folder)
    except:
        msg = "Cannot untar layer %s," % tar_download
        msg += " was there a problem with download?"
        bot.exit(msg)
    return download_folder 
Example 35
Project: sregistry-cli   Author: singularityhub   File: aws.py    Mozilla Public License 2.0 5 votes vote down vote up
def download_task(url, headers, download_to, download_type="layer"):
    """download an image layer (.tar.gz) to a specified download folder.
       This task is done by using local versions of the same download functions
       that are used for the client.
       core stream/download functions of the parent client.

       Parameters
       ==========
       image_id: the shasum id of the layer, already determined to not exist
       repo_name: the image name (library/ubuntu) to retrieve
       download_to: download to this folder. If not set, uses temp.
 

    """
    # Update the user what we are doing
    bot.verbose("Downloading %s from %s" % (download_type, url))

    # Step 1: Download the layer atomically
    file_name = "%s.%s" % (download_to, next(tempfile._get_candidate_names()))

    tar_download = download(url, file_name, headers=headers)

    try:
        shutil.move(tar_download, download_to)
    except:
        msg = "Cannot untar layer %s," % tar_download
        msg += " was there a problem with download?"
        bot.exit(msg)

    return download_to


################################################################################
## Base Functions for Tasks
##
##  These basic tasks are intended for the worker to use, without needing
##  to pickle them for multiprocessing. It works because they don't belong
##  to a client (which we cannot pickle) and are imported by the worker
##  functions directly.
##
################################################################################ 
Example 36
Project: sregistry-cli   Author: singularityhub   File: tasks.py    Mozilla Public License 2.0 5 votes vote down vote up
def download_task(url, headers, destination, download_type="layer"):
    """download an image layer (.tar.gz) to a specified download folder.
       This task is done by using local versions of the same download functions
       that are used for the client.
       core stream/download functions of the parent client.

       Parameters
       ==========
       image_id: the shasum id of the layer, already determined to not exist
       repo_name: the image name (library/ubuntu) to retrieve
       download_folder: download to this folder. If not set, uses temp.
 

    """
    # Update the user what we are doing
    bot.verbose("Downloading %s from %s" % (download_type, url))

    # Step 1: Download the layer atomically
    file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names()))

    tar_download = download(url, file_name, headers=headers)

    try:
        shutil.move(tar_download, destination)
    except:
        msg = "Cannot untar layer %s," % tar_download
        msg += " was there a problem with download?"
        bot.exit(msg)

    return destination


################################################################################
## Base Functions for Tasks
##
##  These basic tasks are intended for the worker to use, without needing
##  to pickle them for multiprocessing. It works because they don't belong
##  to a client (which we cannot pickle) and are imported by the worker
##  functions directly.
##
################################################################################ 
Example 37
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_tempfile.py    GNU General Public License v2.0 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assertIsInstance(obj, tempfile._RandomNameSequence) 
Example 38
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_tempfile.py    GNU General Public License v2.0 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assertTrue(a is b) 
Example 39
Project: cqp-sdk-for-py37-native   Author: crud-boy   File: test_tempfile.py    GNU General Public License v2.0 5 votes vote down vote up
def _mock_candidate_names(*names):
    return support.swap_attr(tempfile,
                             '_get_candidate_names',
                             lambda: iter(names)) 
Example 40
Project: oss-ftp   Author: aliyun   File: test_tempfile.py    MIT License 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assertIsInstance(obj, tempfile._RandomNameSequence) 
Example 41
Project: oss-ftp   Author: aliyun   File: test_tempfile.py    MIT License 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assertTrue(a is b) 
Example 42
Project: oss-ftp   Author: aliyun   File: test_tempfile.py    MIT License 5 votes vote down vote up
def _mock_candidate_names(*names):
    return support.swap_attr(tempfile,
                             '_get_candidate_names',
                             lambda: iter(names)) 
Example 43
Project: ALCD   Author: CNES   File: L1C_band_composition.py    GNU General Public License v3.0 5 votes vote down vote up
def create_contours_density(in_tif, in_channel, out_tif, radius=3, resolution=60):
    '''
    Create a contours density feature from a band
    '''
    tmp_name = next(tempfile._get_candidate_names())
    temp_tif = op.join('tmp', 'band_for_contours_density_{}.tif'.format(tmp_name))
    resize_band(in_tif, out_band=temp_tif, pixelresX=resolution, pixelresY=resolution)

    # Compute the contours of the image
    EdgeExtraction = otbApplication.Registry.CreateApplication("EdgeExtraction")
    EdgeExtraction.SetParameterString("in", str(temp_tif))
    EdgeExtraction.SetParameterInt("channel", int(in_channel))
    EdgeExtraction.SetParameterString("filter", "gradient")
    EdgeExtraction.UpdateParameters()
    EdgeExtraction.Execute()

    # Mean and others moments of the contours
    LocalStatisticExtraction = otbApplication.Registry.CreateApplication("LocalStatisticExtraction")
    LocalStatisticExtraction.SetParameterInputImage(
        "in", EdgeExtraction.GetParameterOutputImage("out"))
    LocalStatisticExtraction.SetParameterInt("channel", 1)
    LocalStatisticExtraction.SetParameterInt("radius", radius)
    LocalStatisticExtraction.UpdateParameters()
    LocalStatisticExtraction.Execute()

    # Only take the mean (1st channel)
    MeanOnly = otbApplication.Registry.CreateApplication("BandMathX")
    MeanOnly.SetParameterString("out", str(out_tif))
    MeanOnly.AddImageToParameterInputImageList(
        "il", LocalStatisticExtraction.GetParameterOutputImage("out"))
    MeanOnly.SetParameterString("exp", "im1b1")
    MeanOnly.UpdateParameters()
    MeanOnly.ExecuteAndWriteOutput() 
Example 44
Project: differentiable-point-clouds   Author: eldar   File: render_point_cloud.py    MIT License 5 votes vote down vote up
def render_point_cloud(point_cloud, cfg):
    """
    Wraps the call to blender to render the image
    """
    cfg = edict(cfg)
    temp_dir = tempfile._get_default_tempdir()

    temp_name = next(tempfile._get_candidate_names())
    in_file = f"{temp_dir}/{temp_name}.npz"
    point_cloud_save = np.reshape(point_cloud, (1, -1, 3))
    np.savez(in_file, point_cloud_save)

    temp_name = next(tempfile._get_candidate_names())
    out_file = f"{temp_dir}/{temp_name}.png"

    args = build_command_line_args([["in_file", in_file],
                                    ["out_file", out_file],
                                    ["vis_azimuth", cfg.vis_azimuth],
                                    ["vis_elevation", cfg.vis_elevation],
                                    ["vis_dist", cfg.vis_dist],
                                    ["cycles_samples", cfg.render_cycles_samples],
                                    ["like_train_data", True],
                                    ["voxels", False],
                                    ["colored_subsets", False],
                                    ["image_size", cfg.render_image_size]],
                                   as_string=False)

    full_args = [blender_exec, "--background", "-P", python_script, "--"] + args
    subprocess.check_call(full_args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)

    image = imageio.imread(out_file)
    os.remove(in_file)
    os.remove(out_file)

    return image 
Example 45
Project: Civil   Author: ebrahimraeyat   File: punchPanel.py    GNU General Public License v3.0 5 votes vote down vote up
def update(self):
        if (QMessageBox.question(None, "update", "update to latest version?!",
                                 QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes) == QMessageBox.No):
            return
        if not internet():
            msg = "You are not connected to the Internet, please check your internet connection."
            QMessageBox.warning(None, 'update', str(msg))
            return

        civil_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
        user_data_dir = App.getUserAppDataDir()
        if not user_data_dir in civil_path:
            mod_path = os.path.join(App.getUserAppDataDir(), 'Mod')
            if not os.path.exists(mod_path):
                os.mkdir(mod_path)
            civil_path = os.path.join(mod_path, 'Civil')
        import git
        g = git.cmd.Git(civil_path)
        msg = ''
        try:
            msg = g.pull(env={'GIT_SSL_NO_VERIFY': '1'})
        except:
            QMessageBox.information(None, "update", "update takes some minutes, please be patient.")
            import shutil
            import tempfile
            default_tmp_dir = tempfile._get_default_tempdir()
            name = next(tempfile._get_candidate_names())
            punch_temp_dir = os.path.join(default_tmp_dir, 'Civil' + name)
            os.mkdir(punch_temp_dir)
            os.chdir(punch_temp_dir)
            git.Git('.').clone("https://github.com/ebrahimraeyat/Civil.git", env={'GIT_SSL_NO_VERIFY': '1'})
            src_folder = os.path.join(punch_temp_dir, 'Civil')

            shutil.copytree(src_folder, civil_path)
            msg = 'update done successfully, please remove Civil folder from FreeCAD installation folder!,  then restart FreeCAD.'

        else:
            if not msg:
                msg = 'error occurred during update\nplease contact with @roknabadi'
        # msg += '\n please restart the program.'
        QMessageBox.information(None, 'update', msg) 
Example 46
Project: mapr-ansible   Author: mapr-emea   File: mapr_entity.py    Apache License 2.0 5 votes vote down vote up
def suggest_temp_volume_name():
    volume_names = load_volume_names()
    while True:
        tmp_volume_name = 'taec.' + next(tempfile._get_candidate_names())
        volume_name_already_taken = False
        for volume_name in volume_names:
            if str(volume_name) == tmp_volume_name:
                volume_name_already_taken = True
                break
        if volume_name_already_taken == False:
            break

    return tmp_volume_name 
Example 47
Project: DeepFormants   Author: MLSpeech   File: utilities.py    MIT License 5 votes vote down vote up
def generate_tmp_filename(extension):
    return tempfile._get_default_tempdir() + "/" + next(tempfile._get_candidate_names()) + "." + extension 
Example 48
Project: eggsnspam   Author: wayfair   File: mixins.py    BSD 2-Clause "Simplified" License 5 votes vote down vote up
def setUp(self):
        # Get a random temporary file to use for testing
        self.tmp_status_file = next(tempfile._get_candidate_names())
        self.app.config['HEALTHCHECK_STATUS_FILE'] = self.tmp_status_file
        super(HealthViewTestCaseMixin, self).setUp() 
Example 49
Project: network-portrait-divergence   Author: bagrow   File: portrait_divergence.py    MIT License 5 votes vote down vote up
def portrait_cpp(graph, fname=None, keepfile=False):
    """Compute and generate portrait of graph using compiled B_matrix
    executable.
    
    Return matrix B where B[i,j] is the number of starting nodes in graph with
    j nodes in shell i
    """
    # file to save to:
    f = fname
    if fname is None:
        f = next(tempfile._get_candidate_names())
    
    # make sure nodes are 0,...,N-1 integers:
    graph = nx.convert_node_labels_to_integers(graph)
    
    # write edgelist:
    nx.write_edgelist(graph, f+".edgelist", data=False)
    
    # make B-matrix:
    os.system("./B_matrix {}.edgelist {}.Bmat > /dev/null".format(f, f))
    portrait = np.loadtxt("{}.Bmat".format(f))
    
    # clean up:
    if not keepfile:
        os.remove(f+".edgelist")
        os.remove(f+".Bmat")
    
    return portrait 
Example 50
Project: lemur   Author: Netflix   File: utils.py    Apache License 2.0 5 votes vote down vote up
def mktemppath():
    try:
        path = os.path.join(
            tempfile._get_default_tempdir(), next(tempfile._get_candidate_names())
        )
        yield path
    finally:
        try:
            os.unlink(path)
        except OSError as e:
            current_app.logger.debug("No file {0}".format(path)) 
Example 51
Project: Ansible_Meetups   Author: jmcalalang   File: bigip_config.py    Apache License 2.0 5 votes vote down vote up
def merge(self, verify=True):
        temp_name = next(tempfile._get_candidate_names())
        remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
        temp_path = '/tmp/' + temp_name

        if self.client.check_mode:
            return True

        self.upload_to_device(temp_name)
        self.move_on_device(remote_path)
        response = self.merge_on_device(
            remote_path=temp_path, verify=verify
        )
        self.remove_temporary_file(remote_path=temp_path)
        return response 
Example 52
Project: snips-nlu   Author: snipsco   File: test_cli.py    Apache License 2.0 5 votes vote down vote up
def setUp(self):
        super(TestCLI, self).setUp()
        if not self.fixture_dir.exists():
            self.fixture_dir.mkdir()

        dataset_stream = io.StringIO(u"""
---
type: intent
name: MakeTea
utterances:
  - make me a [beverage_temperature:Temperature](hot) cup of tea
  - make me [number_of_cups:snips/number](five) tea cups
  - i want [number_of_cups] cups of [beverage_temperature](boiling hot) tea pls
  - can you prepare [number_of_cups] cup of [beverage_temperature](cold) tea ?

---
type: intent
name: MakeCoffee
utterances:
  - make me [number_of_cups:snips/number](one) cup of coffee please
  - brew [number_of_cups] cups of coffee
  - can you prepare [number_of_cups] cup of coffee""")
        beverage_dataset = Dataset.from_yaml_files("en", [dataset_stream]).json

        self.beverage_dataset_path = self.fixture_dir / "beverage_dataset.json"
        if self.beverage_dataset_path.exists():
            self.beverage_dataset_path.unlink()
        with self.beverage_dataset_path.open(mode="w", encoding="utf8") as f:
            f.write(json_string(beverage_dataset))

        self.tmp_file_path = self.fixture_dir / next(
            tempfile._get_candidate_names())
        while self.tmp_file_path.exists():
            self.tmp_file_path = self.fixture_dir / next(
                tempfile._get_candidate_names()) 
Example 53
Project: snips-nlu   Author: snipsco   File: utils.py    Apache License 2.0 5 votes vote down vote up
def setUp(self):
        super(FixtureTest, self).setUp()
        self.fixture_dir = Path(tempfile.mkdtemp())
        if not self.fixture_dir.exists():
            self.fixture_dir.mkdir()

        self.tmp_file_path = self.fixture_dir / next(
            tempfile._get_candidate_names())
        while self.tmp_file_path.exists():
            self.tmp_file_path = self.fixture_dir / next(
                tempfile._get_candidate_names()) 
Example 54
Project: pliers   Author: tyarkoni   File: test_graph.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def test_big_pipeline():
    pytest.importorskip('pygraphviz')
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    visual_nodes = [(FrameSamplingFilter(every=15), [
        (TesseractConverter(), [LengthExtractor()]),
        VibranceExtractor(), 'BrightnessExtractor',
    ])]
    audio_nodes = [(VideoToAudioConverter(), [
        (WitTranscriptionConverter(), ['LengthExtractor'])],
        'video_to_audio')]
    graph = Graph()
    graph.add_nodes(visual_nodes)
    graph.add_nodes(audio_nodes)
    with pytest.raises(RuntimeError):
        graph.draw('temp.png')
    results = graph.run(video, merge=False)
    result = merge_results(results, format='wide', extractor_names='multi')
    # Test that pygraphviz outputs a file
    drawfile = next(tempfile._get_candidate_names())
    graph.draw(drawfile)
    graph.draw(drawfile, color=False)
    assert exists(drawfile)
    os.remove(drawfile)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim_name'].values
    assert 'frame[90]' in result['stim_name'].values 
Example 55
Project: ChainConsumer   Author: Samreay   File: test_analysis.py    MIT License 5 votes vote down vote up
def test_file_loading1(self):
        data = self.data[:1000]
        directory = tempfile._get_default_tempdir()
        filename = next(tempfile._get_candidate_names())
        filename = directory + os.sep + filename + ".txt"
        np.savetxt(filename, data)
        consumer = ChainConsumer()
        consumer.add_chain(filename)
        summary = consumer.analysis.get_summary()
        actual = np.array(list(summary.values())[0])
        assert np.abs(actual[1] - 5.0) < 0.5 
Example 56
Project: ChainConsumer   Author: Samreay   File: test_analysis.py    MIT License 5 votes vote down vote up
def test_file_loading2(self):
        data = self.data[:1000]
        directory = tempfile._get_default_tempdir()
        filename = next(tempfile._get_candidate_names())
        filename = directory + os.sep + filename + ".npy"
        np.save(filename, data)
        consumer = ChainConsumer()
        consumer.add_chain(filename)
        summary = consumer.analysis.get_summary()
        actual = np.array(list(summary.values())[0])
        assert np.abs(actual[1] - 5.0) < 0.5 
Example 57
Project: S-PCGC   Author: omerwe   File: run_pcgc_simulations.py    GNU General Public License v3.0 5 votes vote down vote up
def create_chr_extract_file(plink_fname, chr_num):
    extract_fname = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
    df_bim = pd.read_table(plink_fname+'.bim', delim_whitespace=True, usecols=[0,1])
    df_bim.columns = ['CHR', 'SNP']
    df_bim = df_bim.query('CHR==%d'%(chr_num))
    assert df_bim.shape[0] > 0
    df_bim[['SNP']].to_csv(extract_fname, header=False, index=False)
    return extract_fname 
Example 58
Project: S-PCGC   Author: omerwe   File: run_pcgc_simulations.py    GNU General Public License v3.0 5 votes vote down vote up
def create_sumstats(studies_obj, multi_chrom):
    
    sumstats_prefixes = []    
    ref_fname = studies_obj.ref_fname    
    for study_i in range(num_studies):
    
        #extract the data of study i
        study_obj = studies_obj.studies_arr[study_i]
        plink_fname = study_obj.plink_fname
        prev = study_obj.prev

        #create a file name for the temporary summary statistics
        ss_fname = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
        sumstats_prefixes.append(ss_fname)        
        
        #if not a case-control study, run Plink
        if len(np.unique(study_obj.y)) > 2:
            n = len(study_obj.y)
            if multi_chrom:
                for chr_num in range(1,23):
                    run_plink_linreg(PLINK_EXE, plink_fname, ss_fname+'.%d'%(chr_num), n, chr_num=chr_num)
            else:
                run_plink_linreg(PLINK_EXE, plink_fname, ss_fname, n, chr_num=None)
                
        #if it's a case-control study
        else:
            if multi_chrom:
                for chr_num in range(1,23):
                    run_pcgc_sumstats_creator(prev, ref_fname, plink_fname, ss_fname+'.%d'%(chr_num), study_obj, multi_chrom, chr_num=chr_num)
            else:
                run_pcgc_sumstats_creator(prev, ref_fname, plink_fname, ss_fname, study_obj, multi_chrom, chr_num=None)
        
    return sumstats_prefixes 
Example 59
Project: S-PCGC   Author: omerwe   File: run_pcgc_simulations.py    GNU General Public License v3.0 5 votes vote down vote up
def run_pcgc_sumstats(ref_fname, sumstats_prefixes, multi_chrom, use_he):

    out_fname_prefix = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
        
    #create an args object
    spcgc_args = Dummy()    
    if multi_chrom:
        spcgc_args.sumstats_chr = ','.join([c+'.' for c in sumstats_prefixes])
        spcgc_args.sumstats = None
        spcgc_args.annot_chr = ref_fname+'.'
        spcgc_args.annot = None
        spcgc_args.prodr2_chr = ref_fname+'.'
        spcgc_args.prodr2 = None
        spcgc_args.frqfile_chr = ref_fname+'.'
        spcgc_args.frqfile = None
    else:
        spcgc_args.sumstats = ','.join([c+'.' for c in sumstats_prefixes])
        spcgc_args.sumstats_chr = None
        spcgc_args.annot = ref_fname+'.'
        spcgc_args.annot_chr = None
        spcgc_args.prodr2 = ref_fname+'.'
        spcgc_args.prodr2_chr = None
        spcgc_args.frqfile = ref_fname+'.'
        spcgc_args.frqfile_chr = None
    spcgc_args.out = out_fname_prefix
    spcgc_args.no_Gty = False
    spcgc_args.not_M_5_50 = False
    spcgc_args.n_blocks = 200
    spcgc_args.chisq_max = None
    spcgc_args.he = use_he
    spcgc_args.fit_intercept = False
    spcgc_args.no_annot = False
    spcgc_args.rg_annot = True
    spcgc_args.keep_anno = None
    spcgc_args.print_delete_vals = True
    spcgc_args.remove_anno = None
    spcgc_args.sync = ref_fname+'.'
    
    #run S-PCGC
    imp.reload(pcgc_main); pcgc_obj = pcgc_main.SPCGC(args=spcgc_args)
    return pcgc_obj, out_fname_prefix 
Example 60
Project: ansible_f5   Author: mcgonagle   File: bigip_config.py    Apache License 2.0 5 votes vote down vote up
def merge(self, verify=True):
        temp_name = next(tempfile._get_candidate_names())
        remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
        temp_path = '/tmp/' + temp_name

        if self.client.check_mode:
            return True

        self.upload_to_device(temp_name)
        self.move_on_device(remote_path)
        response = self.merge_on_device(
            remote_path=temp_path, verify=verify
        )
        self.remove_temporary_file(remote_path=temp_path)
        return response 
Example 61
Project: ansible_f5   Author: mcgonagle   File: bigip_config.py    Apache License 2.0 5 votes vote down vote up
def merge(self, verify=True):
        temp_name = next(tempfile._get_candidate_names())
        remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
        temp_path = '/tmp/' + temp_name

        if self.client.check_mode:
            return True

        self.upload_to_device(temp_name)
        self.move_on_device(remote_path)
        response = self.merge_on_device(
            remote_path=temp_path, verify=verify
        )
        self.remove_temporary_file(remote_path=temp_path)
        return response 
Example 62
Project: ansible_f5   Author: mcgonagle   File: bigip_ucs_fetch.py    Apache License 2.0 5 votes vote down vote up
def src(self):
        if self._values['src'] is not None:
            return self._values['src']
        result = next(tempfile._get_candidate_names())
        return result 
Example 63
Project: mutateEXE   Author: jymcheong   File: manipulate2.py    MIT License 5 votes vote down vote up
def upx_pack(self, seed=None):
        # tested with UPX 3.91
        random.seed(seed)
        tmpfilename = os.path.join(
            tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))

        # dump bytez to a temporary file
        with open(tmpfilename, 'wb') as outfile:
            outfile.write(self.bytez)

        options = ['--force', '--overlay=copy']
        compression_level = random.randint(1, 9)
        options += ['-{}'.format(compression_level)]
        # --exact
        # compression levels -1 to -9
        # --overlay=copy [default]

        # optional things:
        # --compress-exports=0/1
        # --compress-icons=0/1/2/3
        # --compress-resources=0/1
        # --strip-relocs=0/1
        options += ['--compress-exports={}'.format(random.randint(0, 1))]
        options += ['--compress-icons={}'.format(random.randint(0, 3))]
        options += ['--compress-resources={}'.format(random.randint(0, 1))]
        options += ['--strip-relocs={}'.format(random.randint(0, 1))]

        with open(os.devnull, 'w') as DEVNULL:
            retcode = subprocess.call(
                ['upx'] + options + [tmpfilename, '-o', tmpfilename + '_packed'], stdout=DEVNULL, stderr=DEVNULL)

        os.unlink(tmpfilename)

        if retcode == 0:  # successfully packed

            with open(tmpfilename + '_packed', 'rb') as infile:
                self.bytez = infile.read()

            os.unlink(tmpfilename + '_packed')

        return self.bytez 
Example 64
Project: Hot   Author: dsolimando   File: test_tempfile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_retval(self):
        # _get_candidate_names returns a _RandomNameSequence object
        obj = tempfile._get_candidate_names()
        self.assert_(isinstance(obj, tempfile._RandomNameSequence)) 
Example 65
Project: Hot   Author: dsolimando   File: test_tempfile.py    GNU General Public License v3.0 5 votes vote down vote up
def test_same_thing(self):
        # _get_candidate_names always returns the same object
        a = tempfile._get_candidate_names()
        b = tempfile._get_candidate_names()

        self.assert_(a is b) 
Example 66
Project: AnkiTools   Author: patarapolw   File: test_ankidirect.py    MIT License 5 votes vote down vote up
def __init__(self, collection):
        if collection is None:
            collection = get_collection_path()

        self.collection_path = next(tempfile._get_candidate_names())
        shutil.copy(src=collection, dst=self.collection_path)
        self.ankidirect = AnkiDirect(anki_database=self.collection_path) 
Example 67
Project: ViFi   Author: namphuon   File: run_vifi.py    GNU General Public License v3.0 5 votes vote down vote up
def create_new_hmm_list(hmm_list_dir, hmm_list):
  input = open("%s/%s" % (hmm_list_dir, hmm_list), 'r')
  output = open("%s/%s" % (hmm_list_dir, next(tempfile._get_candidate_names())), 'w')
  for line in input:
    line = line.strip()
    if os.path.exists(line):
      name = os.path.basename(line)
      output.write("%s/%s\n" % ("/home/hmm_list/", name))
  output.close()    
  os.system('chmod 0777 %s' % output.name)
  return output 
Example 68
Project: debug-visualizer   Author: Kobzol   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def create_pipe():
    """
    Creates a named FIFO pipe in the temp dir.
    @rtype: str
    """
    tmpdir = tempfile.gettempdir()
    temp_name = next(tempfile._get_candidate_names())

    fifo = os.path.join(tmpdir, temp_name + ".fifo")

    os.mkfifo(fifo)

    return os.path.abspath(fifo) 
Example 69
Project: debug-visualizer   Author: Kobzol   File: lldb_io_manager.py    GNU General Public License v3.0 5 votes vote down vote up
def create_pipe():
        tmpdir = tempfile.gettempdir()
        temp_name = next(tempfile._get_candidate_names())

        fifo = os.path.join(tmpdir, temp_name + ".fifo")

        os.mkfifo(fifo)

        return os.path.abspath(fifo) 
Example 70
Project: hyperas   Author: maxpumperla   File: mnist_distributed.py    MIT License 5 votes vote down vote up
def create_model(x_train, y_train, x_test, y_test):
    """
    Create your model...
    """
    layer_1_size = {{quniform(12, 256, 4)}}
    l1_dropout = {{uniform(0.001, 0.7)}}
    params = {
        'l1_size': layer_1_size,
        'l1_dropout': l1_dropout
    }
    num_classes = 10
    model = Sequential()
    model.add(Dense(int(layer_1_size), activation='relu'))
    model.add(Dropout(l1_dropout))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(),
                  metrics=['accuracy'])
    model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, verbose=0)
    out = {
        'loss': -acc,
        'score': score,
        'status': STATUS_OK,
        'model_params': params,
    }
    # optionally store a dump of your model here so you can get it from the database later
    temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
    model.save(temp_name)
    with open(temp_name, 'rb') as infile:
        model_bytes = infile.read()
    out['model_serial'] = model_bytes
    return out 
Example 71
Project: wfuzz   Author: gwen001   File: screenshot.py    GNU General Public License v2.0 5 votes vote down vote up
def process(self, fuzzresult):
        temp_name = next(tempfile._get_candidate_names())
        defult_tmp_dir = tempfile._get_default_tempdir()

        filename = os.path.join(defult_tmp_dir, temp_name + ".png")

	subprocess.call(['cutycapt', '--url=%s' % pipes.quote(fuzzresult.url), '--out=%s' % filename])
	self.add_result("Screnshot taken, output at %s" % filename) 
Example 72
Project: helpme   Author: vsoch   File: tasks.py    Mozilla Public License 2.0 4 votes vote down vote up
def download_task(url, headers, destination, download_type="layer"):
    """download an image layer (.tar.gz) to a specified download folder.
       This task is done by using local versions of the same download functions
       that are used for the client.
       core stream/download functions of the parent client.

       Parameters
       ==========
       image_id: the shasum id of the layer, already determined to not exist
       repo_name: the image name (library/ubuntu) to retrieve
       download_folder: download to this folder. If not set, uses temp.
 

    """
    # Update the user what we are doing
    bot.verbose("Downloading %s from %s" % (download_type, url))

    # Step 1: Download the layer atomically
    file_name = "%s.%s" % (destination, next(tempfile._get_candidate_names()))

    tar_download = download(url, file_name, headers=headers)

    try:
        shutil.move(tar_download, destination)
    except Exception:
        msg = "Cannot untar layer %s," % tar_download
        msg += " was there a problem with download?"
        bot.error(msg)
        sys.exit(1)

    return destination


################################################################################
## Base Functions for Tasks
##
##  These basic tasks are intended for the worker to use, without needing
##  to pickle them for multiprocessing. It works because they don't belong
##  to a client (which we cannot pickle) and are imported by the worker
##  functions directly.
##
################################################################################ 
Example 73
Project: lucid4keras   Author: totti0223   File: utils.py    Apache License 2.0 4 votes vote down vote up
def prepare_model(model,layer_name="conv2d_5",linearize=True):
    '''
    input:
        model : a model built with keras.
        layer_name : a valid layer name within the model.
        linearize : will modify the specified layer from relu to linear. (actually the final layer of the generated intermediate model)
    return:
        modified keras model.
    '''
    def linearize_activations(input_model,layer_name):
        def search_inbound(layer_name):
            layer_list = []
            if "merge" in str(input_model.get_layer(layer_name)):
                #print("\tlayer",input_model.get_layer(layer_name).name,"is a merge layer. searching for connected layers: ")
                for layer in input_model.get_layer(layer_name)._inbound_nodes[0].inbound_layers:
                    search_inbound(layer.name)
            elif "pool" in str(input_model.get_layer(layer_name)):
                pass
            else:
                #print("\ttargeting layer:",input_model.get_layer(layer_name).name,input_model.get_layer(layer_name).activation)
                if input_model.get_layer(layer_name).activation == activations.linear:
                    print("already a linear layer")
                else:
                    print("\tlinearizing layer:",layer_name)
                    input_model.get_layer(layer_name).activation = activations.linear
            return 0

        if "merge" in str(input_model.get_layer(layer_name)) or "GlobalAveragePooling2D" in str(input_model.get_layer(layer_name)):
            print(layer_name,input_model.get_layer(layer_name),"is a merge layer. will linearize connected relu containing layers")
            #print("inbound layers are")
            #for layer in input_model.get_layer(layer_name)._inbound_nodes[0].inbound_layers:
            #    print("\t",layer.name)

            #print("will (recursively) search for layers connected to the specified layers until it hits a activation layer or conv2d layer having activations")
            _ = search_inbound(layer_name)
            model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".hdf5")
            input_model.save(model_path)
            input_model = load_model(model_path,compile=False)
            os.remove(model_path)
            return input_model
        else:
            #print("linearizing the specified layer:",layer_name,str(input_model.get_layer(layer_name)),input_model.get_layer(layer_name).activation)
            if input_model.get_layer(layer_name).activation == activations.linear:
                print("already a linear layer, return unmodified model")
                return input_model
            else:
                print("linearizing layer:",layer_name)
                input_model.get_layer(layer_name).activation = activations.linear
                model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + ".hdf5")
                input_model.save(model_path)
                input_model = load_model(model_path,compile=False)
                os.remove(model_path)
                return input_model
    

    model_intout = model.get_layer(layer_name).output
    int_model = Model(inputs=model.input,outputs=model_intout)

    if linearize:
        int_model = linearize_activations(int_model,layer_name)
    return int_model 
Example 74
Project: ALCD   Author: CNES   File: L1C_band_composition.py    GNU General Public License v3.0 4 votes vote down vote up
def create_composit_band(bands_full_paths, out_tif, resolution=60, composit_type='ND'):
    ''' Create a composition of multiple bands. Their order is important !!!
    The composition type is defined below
    '''
    tmp_name = next(tempfile._get_candidate_names())
    temp0 = op.join('tmp', 'band1_{}.tif'.format(tmp_name))
    temp1 = op.join('tmp', 'band2_{}.tif'.format(tmp_name))

    resize_band(bands_full_paths[0], out_band=temp0, pixelresX=resolution, pixelresY=resolution)
    resize_band(bands_full_paths[1], out_band=temp1, pixelresX=resolution, pixelresY=resolution)

    temp_bands_full_paths = [str(temp0), str(temp1)]

    # Normalized Difference between band 1 and 2
    if composit_type == 'ND':
        if len(bands_full_paths) != 2:
            print('Impossible to continue: 2 bands needs to be given for the ND')
        else:
            BandMathX = otbApplication.Registry.CreateApplication("BandMathX")
            BandMathX.SetParameterStringList("il", temp_bands_full_paths)
            BandMathX.SetParameterString("out", str(out_tif))
            # 0.01 avoid having NaN in the result
            BandMathX.SetParameterString("exp", "(im1b1-im2b1)/(0.01+im1b1+im2b1)")
            BandMathX.UpdateParameters()
            BandMathX.ExecuteAndWriteOutput()

    # Difference between 1 and 2
    if composit_type == 'D':
        if len(bands_full_paths) != 2:
            print('Impossible to continue: 2 bands needs to be given for the D')
        else:
            BandMathX = otbApplication.Registry.CreateApplication("BandMathX")
            BandMathX.SetParameterStringList("il", temp_bands_full_paths)
            BandMathX.SetParameterString("out", str(out_tif))
            BandMathX.SetParameterString("exp", "(im1b1-im2b1)")
            BandMathX.UpdateParameters()
            BandMathX.ExecuteAndWriteOutput()

    # Ratio between 1 and 2
    if composit_type == 'R':
        if len(bands_full_paths) != 2:
            print('Impossible to continue: 2 bands needs to be given for the R')
        else:
            BandMathX = otbApplication.Registry.CreateApplication("BandMathX")
            BandMathX.SetParameterStringList("il", temp_bands_full_paths)
            BandMathX.SetParameterString("out", str(out_tif))
            BandMathX.SetParameterString("exp", "(im1b1+0.01)/(im2b1+0.01)")
            BandMathX.UpdateParameters()
            BandMathX.ExecuteAndWriteOutput() 
Example 75
Project: ruv-dl   Author: sindrig   File: __init__.py    MIT License 4 votes vote down vote up
def move_old_locations(destination, dryrun=False):
    fetcher = ProgramFetcher(None, None, destination)
    for program_info in fetcher.get_all_program_infos():
        if program_info.version >= 1:
            logger.info('Skipping %s', program_info)
            continue
        program = program_info.program
        logger.info('Targeting %s', program['title'])
        seasons = program_info.seasons
        files_to_move = []
        for season, entries in seasons.items():
            season_folder = Entry.get_season_folder(
                destination, program, season
            )
            for i, target_entry in enumerate(entries.sorted()):
                if target_entry.episode.number is None:
                    raise RuntimeError(
                        'You need to attempt sync for this program once '
                        'before running this migration.'
                    )
                src_entry = copy.deepcopy(target_entry)
                src_entry.episode = Episode(None)
                src_entry.episode.number = i + 1

                src_dest = os.path.join(
                    season_folder,
                    src_entry.get_target_basename(program, season),
                )

                target_dest = os.path.join(
                    season_folder,
                    target_entry.get_target_basename(program, season),
                )

                if src_dest != target_dest and os.path.isfile(src_dest):
                    files_to_move.append((src_dest, target_dest))
        while files_to_move:
            i = 0
            src_dest, target_dest = files_to_move.pop(0)
            if dryrun:
                logger.info('Would move %s to %s', src_dest, target_dest)
            elif os.path.isfile(target_dest):
                temp_path = next(tempfile._get_candidate_names())
                temp_path = os.path.join(
                    os.path.dirname(target_dest),
                    temp_path,
                )
                logger.info(
                    '%s exists. Moving temporarily to %s and postponing',
                    target_dest, temp_path,
                )
                shutil.move(src_dest, temp_path)
                files_to_move.append((temp_path, target_dest))
            else:
                os.makedirs(os.path.dirname(target_dest), exist_ok=True)
                logger.info('Moving %s to %s', src_dest, target_dest)
                shutil.move(src_dest, target_dest)

        if not dryrun:
            program_info.version = 1
            program_info.write() 
Example 76
Project: KaFKA   Author: jgomezdans   File: kafka_test_S2.py    GNU General Public License v3.0 4 votes vote down vote up
def wrapper(the_chunk):
    parameter_list = ['n', 'cab', 'car', 'cbrown', 'cw', 'cm',
                      'lai', 'ala', 'bsoil', 'psoil']
    this_X, this_Y, nx_valid, ny_valid, chunk = the_chunk
    ulx = this_X
    uly = this_Y
    lrx = this_X + nx_valid
    lry = this_Y + ny_valid
    
    roi = [ulx, uly, lrx, lry]

    start_time = "2017001"
    
    emulator_folder = "/home/ucfafyi/DATA/Multiply/emus/sail/"
    
    
    data_folder = "/data/nemesis/S2_data/30/S/WJ/"
    import tempfile

    temp_name = next(tempfile._get_candidate_names())
    state_mask = gdal.Translate(f"{temp_name:s}.vrt", "./Barrax_pivots.tif",
                                srcWin=[this_X, this_Y, nx_valid, ny_valid], 
                                format="VRT")
    state_mask = f"{temp_name:s}.vrt"
    
    s2_observations = Sentinel2Observations(data_folder,
                                            emulator_folder, 
                                            state_mask)

    projection, geotransform = s2_observations.define_output()

    output = KafkaOutput(parameter_list, geotransform,
                         projection, "/tmp/", prefix=hex(chunk))

    the_prior = SAILPrior(parameter_list, state_mask)

    g = gdal.Open(state_mask)
    mask = g.ReadAsArray().astype(np.bool)

    kf = LinearKalman(s2_observations, output, mask,
                      create_prosail_observation_operator,
                      parameter_list,
                      state_propagation=None,
                      prior=the_prior,
                      linear=False)

    # Get starting state... We can request the prior object for this
    x_forecast, P_forecast_inv = the_prior.process_prior(None)
    
    Q = np.zeros_like(x_forecast)
    
    kf.set_trajectory_model()
    kf.set_trajectory_uncertainty(Q)
    
    base = datetime(2017,7,3)
    num_days = 10
    time_grid = list((base + timedelta(days=x) 
                     for x in range(0, num_days, 2)))
    kf.run(time_grid, x_forecast, None, P_forecast_inv,
           iter_obs_op=True) 
Example 77
Project: resilient-python-api   Author: ibmresilient   File: resilient_common.py    MIT License 4 votes vote down vote up
def write_to_tmp_file(data, tmp_file_name=None, path_tmp_dir=None):
    """Writes data to a file in a safely created temp directory. If no
    `tmp_file_name` is provided, a temp name will be given. If no `path_tmp_dir`
    is provided a temp directory is created with the prefix `resilient-lib-tmp-`.

    When used within a Resilient Function, ensure you safely remove the created temp
    directory in the `finally` block of the FunctionComponent code.

    Example:
        import os
        import shutil
        try:
            path_tmp_file, path_tmp_dir = write_to_tmp_file(attachment_contents, tmp_file_name=attachment_metadata.get("name"))

        except Exception:
            yield FunctionError()

        finally:
            if path_tmp_dir and os.path.isdir(path_tmp_dir):
                shutil.rmtree(path_tmp_dir)

    :param data: bytes to be written to the file
    :type data: `bytes`
    :param tmp_file_name: name to be given to the file.
    :type tmp_file_name: `str`
    :param path_tmp_dir: path to an existing directory to use as the temp dir
    :type path_tmp_dir: `str`
    :return: a tuple (path_tmp_file, path_tmp_dir)
    :rtype: tuple
    """

    # If no tmp_file_name provided use next tempfile candidate name
    if not tmp_file_name:
        tmp_file_name = next(tempfile._get_candidate_names())

    # If no path_tmp_dir provided, create one
    if not path_tmp_dir:
        path_tmp_dir = tempfile.mkdtemp(prefix="resilient-lib-tmp-")

    elif not os.path.isdir(path_tmp_dir):
        raise IOError("Path does not exist: {0}".format(path_tmp_dir))

    # Generate path to tmp file
    path_tmp_file = os.path.join(path_tmp_dir, tmp_file_name)

    # Write the file
    with io.open(path_tmp_file, mode="wb") as temp_file:
        temp_file.write(data)

    return (path_tmp_file, path_tmp_dir) 
Example 78
Project: pliers   Author: tyarkoni   File: test_graph.py    BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
def test_big_pipeline_json():
    pytest.importorskip('pygraphviz')
    filename = join(get_test_data_path(), 'video', 'obama_speech.mp4')
    video = VideoStim(filename)
    nodes = {
        "roots": [
            {
                "transformer": "FrameSamplingFilter",
                "parameters": {
                    "every": 15
                },
                "children": [
                    {
                        "transformer": "TesseractConverter",
                        "children": [
                            {
                                "transformer": "LengthExtractor"
                            }
                        ]
                    },
                    {
                        "transformer": "VibranceExtractor"
                    },
                    {
                        "transformer": "BrightnessExtractor"
                    }
                ]
            },
            {
                "transformer": "VideoToAudioConverter",
                "children": [
                    {
                        "transformer": "WitTranscriptionConverter",
                        "children": [
                            {
                                "transformer": "LengthExtractor"
                            }
                        ]
                    }
                ]
            }
        ]
    }
    graph = Graph(nodes)
    results = graph.run(video, merge=False)
    result = merge_results(results, format='wide', extractor_names='multi')
    # Test that pygraphviz outputs a file
    drawfile = next(tempfile._get_candidate_names())
    graph.draw(drawfile)
    assert exists(drawfile)
    os.remove(drawfile)
    assert ('LengthExtractor', 'text_length') in result.columns
    assert ('VibranceExtractor', 'vibrance') in result.columns
    # assert not result[('onset', '')].isnull().any()
    assert 'text[negotiations]' in result['stim_name'].values
    assert 'frame[90]' in result['stim_name'].values 
Example 79
Project: S-PCGC   Author: omerwe   File: pcgc_simulator.py    GNU General Public License v3.0 4 votes vote down vote up
def write_ref_files(self, multi_chrom):
    
        ref_fname = os.path.join(tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
    
        #create MAF files
        df_mafs = self.df_map[['CHR', 'SNP']].copy()        
        df_mafs['MAF'] = self.mafs
        df_mafs['A0'] = 2
        df_mafs['A1'] = 1
        df_mafs.to_csv(ref_fname+'.frq', sep='\t', index=False, header=True)
        for chr_num in self.chr_arr:
            anno_chr = self.annotations[self.chr_arr==chr_num]
            df_mafs_chr = df_mafs.query('CHR == %d'%(chr_num))
            df_mafs_chr.to_csv(ref_fname+'.%d.frq'%(chr_num), sep='\t', index=False, header=True)
            
        #create a sync file
        annotation_names = ['anno_%d'%(anno_i+1) for anno_i in range(self.annotations.shape[1])]        
        min_annot = np.min(self.annotations, axis=0)
        min_annot[min_annot>0]=0
        df_sync = pd.Series(min_annot, index=annotation_names)
        df_sync.index.name = 'Category'
        df_sync.to_csv(ref_fname+'.sync', sep='\t', float_format='%0.5e')        

        #create prod_r^2 files and M_annot files
        df_prod_r2 = pd.DataFrame(((self.annotations-min_annot)**2).T.dot((self.annotations-min_annot)**2), index=annotation_names, columns=annotation_names)
        df_M = pd.DataFrame(np.row_stack(self.annotations.sum(axis=0)).T, columns=annotation_names)
        if not multi_chrom:
            df_prod_r2.to_csv(ref_fname+'.prodr2', sep='\t', index=True, header=True)
            df_M.to_csv(ref_fname+'.l2.M_5_50', header=False, index=False, float_format='%0.3f', sep='\t')
        else:
            for chr_num in self.chr_arr:
                anno_chr = self.annotations[self.chr_arr==chr_num]
                df_prod_r2_chr = pd.DataFrame((anno_chr**2).T.dot(anno_chr**2), index=annotation_names, columns=annotation_names)
                df_prod_r2_chr.to_csv(ref_fname+'.%d.prodr2'%(chr_num), sep='\t', index=True, header=True)
                df_M_chr = pd.DataFrame(np.row_stack(anno_chr.sum(axis=0)).T, columns=annotation_names)
                df_M_chr.to_csv(ref_fname+'.%d.l2.M_5_50'%(chr_num), header=False, index=False, float_format='%0.3f', sep='\t')
        
        #create annotation files
        df_anno = self.df_map[['CHR', 'BP', 'SNP', 'CM']].copy()
        for anno_i, anno_name in enumerate(annotation_names):
            df_anno[anno_name] = self.annotations[:, anno_i]
        if not multi_chrom:
            df_anno.to_csv(ref_fname+'.annot.gz', sep='\t', index=False, header=True, compression='gzip')
        else:
            for chr_num in self.chr_arr:
                df_anno_chr = df_anno.query('CHR == %d'%(chr_num))
                df_anno_chr.to_csv(ref_fname+'.%d.annot.gz'%(chr_num), sep='\t', index=False, header=True, compression='gzip')        
            
        #save files prefix
        self.ref_fname = ref_fname 
Example 80
Project: keras-vis   Author: raghakot   File: tensorflow_backend.py    MIT License 4 votes vote down vote up
def modify_model_backprop(model, backprop_modifier):
    """Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior.

    Args:
        model:  The `keras.models.Model` instance.
        backprop_modifier: One of `{'guided', 'rectified'}`

    Returns:
        A copy of model with modified activations for backwards pass.
    """
    # The general strategy is as follows:
    # - Save original model so that upstream callers don't see unexpected results with their models.
    # - Call backend specific function that registers the custom op and loads the model under modified context manager.
    # - Maintain cache to save this expensive process on subsequent calls.
    # - Load model with custom context modifying backprop behavior.
    #
    # The reason for this round about way is because the graph needs to be rebuild when any of its layer builder
    # functions are changed. This is very complicated to do in Keras and makes the implementation very tightly bound
    # with keras internals. By saving and loading models, we dont have to worry about future compatibility.
    #
    # The only exception to this is the way advanced activations are handled which makes use of some keras internal
    # knowledge and might break in the future.
    # ADD on 22 Jul 2018:
    #     In fact, it has broken. Currently, advanced activations are not supported.

    # 0. Retrieve from cache if previously computed.
    modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier))
    if modified_model is not None:
        return modified_model

    model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
    try:
        # 1. Save original model
        model.save(model_path)

        # 2. Register modifier and load modified model under custom context.
        modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier)
        if modifier_fn is None:
            raise ValueError("'{}' modifier is not supported".format(backprop_modifier))
        modifier_fn(backprop_modifier)

        # 3. Create graph under custom context manager.
        with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}):
            #  This should rebuild graph with modifications.
            modified_model = load_model(model_path)

            # Cache to improve subsequent call performance.
            _MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model
            return modified_model
    finally:
        os.remove(model_path)