Python shutil.copyfileobj() Examples

The following are code examples for showing how to use shutil.copyfileobj(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: REMAKE   Author: PHIEZUKE   File: talk.py    GNU General Public License v3.0 6 votes vote down vote up
def sendImageWithURL2(self, to, url):
        """Send a image with given image url

        :param url: image url to send
        """
        path = 'tmp/pythonLine.data'

        r = requests.get(url, stream=True)
        if r.status_code == 200:
            with open(path, 'wb') as f:
                shutil.copyfileobj(r.raw, f)
        else:
            raise Exception('Download image failure.')

        try:
            self.sendImage(to, path)
        except Exception as e:
            raise e 
Example 2
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: layman.py    MIT License 6 votes vote down vote up
def download_url(module, url, dest):
    '''
    :param url: the URL to download
    :param dest: the absolute path of where to save the downloaded content to;
        it must be writable and not a directory

    :raises ModuleError
    '''

    # Hack to add params in the form that fetch_url expects
    module.params['http_agent'] = USERAGENT
    response, info = fetch_url(module, url)
    if info['status'] != 200:
        raise ModuleError("Failed to get %s: %s" % (url, info['msg']))

    try:
        with open(dest, 'w') as f:
            shutil.copyfileobj(response, f)
    except IOError as e:
        raise ModuleError("Failed to write: %s" % str(e)) 
Example 3
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: text.py    Apache License 2.0 6 votes vote down vote up
def _get_data(self):
        archive_file_name, archive_hash = self._archive_file
        data_file_name, data_hash = self._data_file[self._segment]
        path = os.path.join(self._root, data_file_name)
        if not os.path.exists(path) or not check_sha1(path, data_hash):
            namespace = 'gluon/dataset/'+self._namespace
            downloaded_file_path = download(_get_repo_file_url(namespace, archive_file_name),
                                            path=self._root,
                                            sha1_hash=archive_hash)

            with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
                for member in zf.namelist():
                    filename = os.path.basename(member)
                    if filename:
                        dest = os.path.join(self._root, filename)
                        with zf.open(member) as source, \
                             open(dest, "wb") as target:
                            shutil.copyfileobj(source, target)

        data, label = self._read_batch(path)

        self._data = nd.array(data, dtype=data.dtype).reshape((-1, self._seq_len))
        self._label = nd.array(label, dtype=label.dtype).reshape((-1, self._seq_len)) 
Example 4
Project: CyberTK-Self   Author: CyberTKR   File: LineApi.py    GNU General Public License v2.0 6 votes vote down vote up
def sendImageWithUrl(self, to_, url):
        """Send a image with given image url

        :param url: image url to send
        """
        path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))


        r = requests.get(url, stream=True)
        if r.status_code == 200:
            with open(path, 'w') as f:
                shutil.copyfileobj(r.raw, f)
        else:
            raise Exception('Download image failure.')

        try:
            self.sendImage(to_, path)
        except Exception as e:
            raise e 
Example 5
Project: sic   Author: Yanixos   File: tarfile.py    GNU General Public License v3.0 6 votes vote down vote up
def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None):
    """Copy length bytes from fileobj src to fileobj dst.
       If length is None, copy the entire content.
    """
    bufsize = bufsize or 16 * 1024
    if length == 0:
        return
    if length is None:
        shutil.copyfileobj(src, dst, bufsize)
        return

    blocks, remainder = divmod(length, bufsize)
    for b in range(blocks):
        buf = src.read(bufsize)
        if len(buf) < bufsize:
            raise exception("unexpected end of data")
        dst.write(buf)

    if remainder != 0:
        buf = src.read(remainder)
        if len(buf) < remainder:
            raise exception("unexpected end of data")
        dst.write(buf)
    return 
Example 6
Project: sic   Author: Yanixos   File: tarfile.py    GNU General Public License v3.0 6 votes vote down vote up
def addfile(self, tarinfo, fileobj=None):
        """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
           given, it should be a binary file, and tarinfo.size bytes are read
           from it and added to the archive. You can create TarInfo objects
           directly, or by using gettarinfo().
        """
        self._check("awx")

        tarinfo = copy.copy(tarinfo)

        buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
        self.fileobj.write(buf)
        self.offset += len(buf)
        bufsize=self.copybufsize
        # If there's data to follow, append it.
        if fileobj is not None:
            copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize)
            blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
            if remainder > 0:
                self.fileobj.write(NUL * (BLOCKSIZE - remainder))
                blocks += 1
            self.offset += blocks * BLOCKSIZE

        self.members.append(tarinfo) 
Example 7
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 6 votes vote down vote up
def postPull(self, f):
        assert f.endswith('.gz')
        logv('Uncompressing {}...'.format(f))
        tarfilename = f[:-len('.gz')]
        if AbstractTARDistribution._has_gzip():
            with open(tarfilename, 'wb') as tar:
                # force, quiet, decompress, cat to stdout
                run([AbstractTARDistribution._gzip_binary(), '-f', '-q', '-d', '-c', f], out=tar)
        else:
            with gzip.open(f, 'rb') as gz, open(tarfilename, 'wb') as tar:
                shutil.copyfileobj(gz, tar)
        os.remove(f)
        if self.output:
            output = self.get_output()
            with tarfile.open(tarfilename, 'r:') as tar:
                logv('Extracting {} to {}'.format(tarfilename, output))
                tar.extractall(output)
        return tarfilename 
Example 8
Project: dockerfiles   Author: floydhub   File: dataset.py    Apache License 2.0 6 votes vote down vote up
def download(directory, filename):
  """Download (and unzip) a file from the MNIST dataset if not already done."""
  filepath = os.path.join(directory, filename)
  if tf.gfile.Exists(filepath):
    return filepath
  if not tf.gfile.Exists(directory):
    tf.gfile.MakeDirs(directory)
  # CVDF mirror of http://yann.lecun.com/exdb/mnist/
  url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
  _, zipped_filepath = tempfile.mkstemp(suffix='.gz')
  print('Downloading %s to %s' % (url, zipped_filepath))
  urllib.request.urlretrieve(url, zipped_filepath)
  with gzip.open(zipped_filepath, 'rb') as f_in, \
      tf.gfile.Open(filepath, 'wb') as f_out:
    shutil.copyfileobj(f_in, f_out)
  os.remove(zipped_filepath)
  return filepath 
Example 9
Project: dockerfiles   Author: floydhub   File: dataset.py    Apache License 2.0 6 votes vote down vote up
def download(directory, filename):
  """Download (and unzip) a file from the MNIST dataset if not already done."""
  filepath = os.path.join(directory, filename)
  if tf.gfile.Exists(filepath):
    return filepath
  if not tf.gfile.Exists(directory):
    tf.gfile.MakeDirs(directory)
  # CVDF mirror of http://yann.lecun.com/exdb/mnist/
  url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
  _, zipped_filepath = tempfile.mkstemp(suffix='.gz')
  print('Downloading %s to %s' % (url, zipped_filepath))
  urllib.request.urlretrieve(url, zipped_filepath)
  with gzip.open(zipped_filepath, 'rb') as f_in, \
      tf.gfile.Open(filepath, 'wb') as f_out:
    shutil.copyfileobj(f_in, f_out)
  os.remove(zipped_filepath)
  return filepath 
Example 10
Project: openhatch   Author: campbe13   File: runner.py    GNU Affero General Public License v3.0 6 votes vote down vote up
def project_environment(project):
    app = get_application()
    eggstorage = app.getComponent(IEggStorage)
    version, eggfile = eggstorage.get(project)
    if eggfile:
        prefix = '%s-%s-' % (project, version)
        fd, eggpath = tempfile.mkstemp(prefix=prefix, suffix='.egg')
        lf = os.fdopen(fd, 'wb')
        shutil.copyfileobj(eggfile, lf)
        lf.close()
        activate_egg(eggpath)
    else:
        eggpath = None
    try:
        assert 'scrapy.conf' not in sys.modules, "Scrapy settings already loaded"
        yield
    finally:
        if eggpath:
            os.remove(eggpath) 
Example 11
Project: jawfish   Author: war-and-code   File: tarfile.py    MIT License 6 votes vote down vote up
def copyfileobj(src, dst, length=None):
    """Copy length bytes from fileobj src to fileobj dst.
       If length is None, copy the entire content.
    """
    if length == 0:
        return
    if length is None:
        shutil.copyfileobj(src, dst)
        return

    BUFSIZE = 16 * 1024
    blocks, remainder = divmod(length, BUFSIZE)
    for b in range(blocks):
        buf = src.read(BUFSIZE)
        if len(buf) < BUFSIZE:
            raise IOError("end of file reached")
        dst.write(buf)

    if remainder != 0:
        buf = src.read(remainder)
        if len(buf) < remainder:
            raise IOError("end of file reached")
        dst.write(buf)
    return 
Example 12
Project: jawfish   Author: war-and-code   File: tarfile.py    MIT License 6 votes vote down vote up
def addfile(self, tarinfo, fileobj=None):
        """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
           given, tarinfo.size bytes are read from it and added to the archive.
           You can create TarInfo objects using gettarinfo().
           On Windows platforms, `fileobj' should always be opened with mode
           'rb' to avoid irritation about the file size.
        """
        self._check("aw")

        tarinfo = copy.copy(tarinfo)

        buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
        self.fileobj.write(buf)
        self.offset += len(buf)

        # If there's data to follow, append it.
        if fileobj is not None:
            copyfileobj(fileobj, self.fileobj, tarinfo.size)
            blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
            if remainder > 0:
                self.fileobj.write(NUL * (BLOCKSIZE - remainder))
                blocks += 1
            self.offset += blocks * BLOCKSIZE

        self.members.append(tarinfo) 
Example 13
Project: smoke-zephyr   Author: zeroSteiner   File: utilities.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def download(url, filename=None):
	"""
	Download a file from a url and save it to disk.

	:param str url: The URL to fetch the file from.
	:param str filename: The destination file to write the data to.
	"""
	# requirements os, shutil, urllib.parse, urllib.request
	if not filename:
		url_parts = urllib.parse.urlparse(url)
		filename = os.path.basename(url_parts.path)
	url_h = urllib.request.urlopen(url)
	with open(filename, 'wb') as file_h:
		shutil.copyfileobj(url_h, file_h)
	url_h.close()
	return 
Example 14
Project: fetchLandsatSentinelFromGoogleCloud   Author: vascobnunes   File: fels.py    MIT License 6 votes vote down vote up
def download_metadata_file(url, outputdir, program):
    """Download and unzip the catalogue files."""
    zipped_index_path = os.path.join(outputdir, 'index_' + program + '.csv.gz')
    if not os.path.isfile(zipped_index_path):
        if not os.path.exists(os.path.dirname(zipped_index_path)):
            os.makedirs(os.path.dirname(zipped_index_path))
        print("Downloading Metadata file...")
        content = urlopen(url)
        with open(zipped_index_path, 'wb') as f:
            shutil.copyfileobj(content, f)
    index_path = os.path.join(outputdir, 'index_' + program + '.csv')
    if not os.path.isfile(index_path):
        print("Unzipping Metadata file...")
        with gzip.open(zipped_index_path) as gzip_index, open(index_path, 'wb') as f:
            shutil.copyfileobj(gzip_index, f)
    return index_path 
Example 15
Project: text-classification-tensorflow   Author: yxtay   File: acl_imdb.py    MIT License 6 votes vote down vote up
def download_data(url="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
                  dest_dir="data"):
    # prepare filename
    _, _, url_path, _, _ = urlsplit(url)
    filename = os.path.basename(url_path)
    dest = os.path.join(dest_dir, filename)
    make_dirs(dest_dir)

    # downlaod tar.gz
    if not os.path.exists(dest):
        logger.info("downloading file: %s.", url)
        r = requests.get(url, stream=True)
        with open(dest, "wb") as f:
            shutil.copyfileobj(r.raw, f)
        logger.info("file downloaded: %s.", dest)

    # extract tag.gz
    if not os.path.exists(os.path.join(dest_dir, "aclImdb", "README")):
        tar = tarfile.open(dest, "r:gz")
        tar.extractall(dest_dir)
        tar.close()
        logger.info("file extracted.") 
Example 16
Project: reacnetgenerator   Author: tongzhugroup   File: utils.py    GNU Lesser General Public License v3.0 6 votes vote down vote up
def download_file(urls, pathfilename, sha256):
    s = requests.Session()
    s.mount('http://', HTTPAdapter(max_retries=3))
    s.mount('https://', HTTPAdapter(max_retries=3))
    # download if not exists
    if os.path.isfile(pathfilename) and (checksha256(pathfilename, sha256) or sha256 is None):
        return pathfilename

    # from https://stackoverflow.com/questions/16694907
    for url in must_be_list(urls):
        logging.info(f"Try to download {pathfilename} from {url}")
        with s.get(url, stream=True) as r, open(pathfilename, 'wb') as f:
            try:
                shutil.copyfileobj(r.raw, f)
                break
            except requests.exceptions.RequestException as e:
                logging.warning(f"Request {pathfilename} Error.", exc_info=e)
    else:
        raise RuntimeError(f"Cannot download {pathfilename}.")

    return pathfilename 
Example 17
Project: TornadoWeb   Author: VxCoder   File: api.py    Apache License 2.0 6 votes vote down vote up
def get_object_to_file(self, key, filename,
                           byte_range=None,
                           headers=None,
                           progress_callback=None):
        """下载一个文件到本地文件。

        :param key: 文件名
        :param filename: 本地文件名。要求父目录已经存在,且有写权限。
        :param byte_range: 指定下载范围。参见 :ref:`byte_range`

        :param headers: HTTP头部
        :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict

        :param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`

        :return: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
        """
        with open(to_unicode(filename), 'wb') as f:
            result = self.get_object(key, byte_range=byte_range, headers=headers, progress_callback=progress_callback)
            shutil.copyfileobj(result, f)

            return result 
Example 18
Project: SBpro   Author: PHIEZUKE   File: models.py    GNU General Public License v3.0 6 votes vote down vote up
def downloadFileURL(self, fileUrl, returnAs='path', saveAs=''):
        if returnAs not in ['path','bool','bin']:
            raise Exception('Invalid returnAs value')
        if saveAs == '':
            saveAs = self.genTempFile()
        r = self.server.getContent(fileUrl)
        if r.status_code == 200:
            with open(saveAs, 'wb') as f:
                shutil.copyfileobj(r.raw, f)
            if returnAs == 'path':
                return saveAs
            elif returnAs == 'bool':
                return True
            elif returnAs == 'bin':
                return r.raw
        else:
            raise Exception('Download file failure.') 
Example 19
Project: SBpro   Author: PHIEZUKE   File: object.py    GNU General Public License v3.0 6 votes vote down vote up
def downloadObjectMsg(self, messageId, returnAs='path', saveAs=''):
        if saveAs == '':
            saveAs = self.genTempFile('path')
        if returnAs not in ['path','bool','bin']:
            raise Exception('Invalid returnAs value')
        params = {'oid': messageId}
        url = self.server.urlEncode(self.server.LINE_OBS_DOMAIN, '/talk/m/download.nhn', params)
        r = self.server.getContent(url)
        if r.status_code == 200:
            with open(saveAs, 'wb') as f:
                shutil.copyfileobj(r.raw, f)
            if returnAs == 'path':
                return saveAs
            elif returnAs == 'bool':
                return True
            elif returnAs == 'bin':
                return r.raw
        else:
            raise Exception('Download object failure.') 
Example 20
Project: NordVPN-NetworkManager-Gui   Author: vfosterm   File: nord_nm_gui.py    GNU General Public License v3.0 5 votes vote down vote up
def get_ovpn(self):
        """
        Gets ovpn file from nord servers and saves it to a temporary location
        """
        # https://downloads.nordcdn.com/configs/files/ovpn_udp/servers/sg173.nordvpn.com.udp.ovpn
        self.ovpn_path = None
        ovpn_url = None
        udp_url = 'https://downloads.nordcdn.com/configs/files/ovpn_udp/servers/'
        tcp_url = 'https://downloads.nordcdn.com/configs/files/ovpn_tcp/servers/'
        udp_xor_url = 'https://downloads.nordcdn.com/configs/files/ovpn_xor_udp/servers/'
        tcp_xor_url = 'https://downloads.nordcdn.com/configs/files/ovpn_xor_tcp/servers/'

        if (self.server_type_select.currentText() == 'Obfuscated Server') and (self.connection_type_select.currentText() == 'UDP'):
            ovpn_url = udp_xor_url
        elif (self.server_type_select.currentText() == 'Obfuscated Server') and (self.connection_type_select.currentText() == 'TCP'):
            ovpn_url = tcp_xor_url
        elif (self.server_type_select.currentText() != 'Obfuscated Server') and (self.connection_type_select.currentText() == 'UDP'):
            ovpn_url = udp_url
        elif (self.server_type_select.currentText() != 'Obfuscated Server') and (self.connection_type_select.currentText() == 'TCP'):
            ovpn_url = tcp_url

        if self.connection_type_select.currentText() == 'UDP':
            filename = self.domain_list[self.server_list.currentRow()] + '.udp.ovpn'
            ovpn_file = requests.get(ovpn_url + filename, stream=True)
            if ovpn_file.status_code == requests.codes.ok:
                self.ovpn_path = os.path.join(self.config_path, filename)
                with open(self.ovpn_path, 'wb') as out_file:
                    shutil.copyfileobj(ovpn_file.raw, out_file)
            else: self.statusbar.showMessage('Error fetching configuration files', 2000)

        elif self.connection_type_select.currentText() == 'TCP':
            filename = self.domain_list[self.server_list.currentRow()] + '.tcp.ovpn'
            ovpn_file = requests.get(ovpn_url + filename, stream=True)
            if ovpn_file.status_code == requests.codes.ok:
                self.ovpn_path = os.path.join(self.config_path, filename)
                with open(self.ovpn_path, 'wb') as out_file:
                    shutil.copyfileobj(ovpn_file.raw, out_file)
            else: self.statusbar.showMessage('Error fetching configuration files', 2000)

        self.server_list.setFocus() 
Example 21
Project: pyblish-win   Author: pyblish   File: SimpleHTTPServer.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def copyfile(self, source, outputfile):
        """Copy all data between two file objects.

        The SOURCE argument is a file object open for reading
        (or anything with a read() method) and the DESTINATION
        argument is a file object open for writing (or
        anything with a write() method).

        The only reason for overriding this would be to change
        the block size or perhaps to replace newlines by CRLF
        -- note however that this the default server uses this
        to copy binary data as well.

        """
        shutil.copyfileobj(source, outputfile) 
Example 22
Project: pyblish-win   Author: pyblish   File: tarfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def copyfileobj(src, dst, length=None):
    """Copy length bytes from fileobj src to fileobj dst.
       If length is None, copy the entire content.
    """
    if length == 0:
        return
    if length is None:
        shutil.copyfileobj(src, dst)
        return

    BUFSIZE = 16 * 1024
    blocks, remainder = divmod(length, BUFSIZE)
    for b in xrange(blocks):
        buf = src.read(BUFSIZE)
        if len(buf) < BUFSIZE:
            raise IOError("end of file reached")
        dst.write(buf)

    if remainder != 0:
        buf = src.read(remainder)
        if len(buf) < remainder:
            raise IOError("end of file reached")
        dst.write(buf)
    return 
Example 23
Project: pyblish-win   Author: pyblish   File: tarfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def addfile(self, tarinfo, fileobj=None):
        """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
           given, tarinfo.size bytes are read from it and added to the archive.
           You can create TarInfo objects using gettarinfo().
           On Windows platforms, `fileobj' should always be opened with mode
           'rb' to avoid irritation about the file size.
        """
        self._check("aw")

        tarinfo = copy.copy(tarinfo)

        buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
        self.fileobj.write(buf)
        self.offset += len(buf)

        # If there's data to follow, append it.
        if fileobj is not None:
            copyfileobj(fileobj, self.fileobj, tarinfo.size)
            blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
            if remainder > 0:
                self.fileobj.write(NUL * (BLOCKSIZE - remainder))
                blocks += 1
            self.offset += blocks * BLOCKSIZE

        self.members.append(tarinfo) 
Example 24
Project: pyblish-win   Author: pyblish   File: tarfile.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def makefile(self, tarinfo, targetpath):
        """Make a file called targetpath.
        """
        source = self.extractfile(tarinfo)
        try:
            with bltn_open(targetpath, "wb") as target:
                copyfileobj(source, target)
        finally:
            source.close() 
Example 25
Project: BASS   Author: Cisco-Talos   File: bindiff.py    GNU General Public License v2.0 5 votes vote down vote up
def bindiff_pickle_export(self, sample, is_64_bit = True, timeout = None):
        """
        Load a sample into IDA Pro, perform autoanalysis and export a pickle file. 
        :param sample: The sample's path
        :param is_64_bit: If the sample needs to be analyzed by the 64 bit version of IDA
        :param timeout: Timeout for the analysis in seconds
        :return: The file name of the exported pickle database. The file needs
        to be deleted by the caller. Returns None on error.
        """

        data_to_send = {
            "timeout": timeout,
            "is_64_bit": is_64_bit}
        url = "%s/binexport_pickle" % next(self._urls)
        log.debug("curl -XPOST --data '%s' '%s'", json.dumps(data_to_send), url)
        response = requests.post(url, data = data_to_send, files = {os.path.basename(sample): open(sample, "rb")})
        if response.status_code == 200:
            handle_tar, path_tar = tempfile.mkstemp(suffix = ".tar.gz")
            with os.fdopen(handle_tar, "wb") as f:
                map(f.write, response.iter_content(1024))
            directory = tempfile.mkdtemp()
            subprocess.check_call(["tar", "xf", path_tar], cwd = directory)

            handle_bindiff, output_bindiff = tempfile.mkstemp(suffix = ".BinExport")
            with os.fdopen(handle_bindiff, "wb") as f:
                with open(os.path.join(directory, "output.BinExport"), "rb") as f2:
                    shutil.copyfileobj(f2, f)
            handle_pickle, output_pickle = tempfile.mkstemp(suffix = ".pickle")
            with os.fdopen(handle_pickle, "wb") as f:
                with open(os.path.join(directory, "output.pickle"), "rb") as f2:
                    shutil.copyfileobj(f2, f)
            os.unlink(path_tar)
            shutil.rmtree(directory)
            return output_bindiff, output_pickle
        else:
            log.error("Bindiff server responded with status code %d: %s", response.status_code, response.content)
            return None 
Example 26
Project: Rackfocus   Author: Antrikshy   File: compilation.py    MIT License 5 votes vote down vote up
def fetch_datasets(self):
        for model_class in DatasetModel.__subclasses__():
            model = model_class()
            output_file = os.path.join(self.working_dir, model.get_file_name())
            download_url = model.get_download_url()
            print("Downloading dataset: {}".format(output_file))
            with urllib.request.urlopen(download_url) as response, open(output_file, 'wb') as out:
                shutil.copyfileobj(response, out) 
Example 27
Project: REMAKE   Author: PHIEZUKE   File: models.py    GNU General Public License v3.0 5 votes vote down vote up
def saveFile(self, path, raw):
        with open(path, 'wb') as f:
            shutil.copyfileobj(raw, f) 
Example 28
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: panos_import.py    MIT License 5 votes vote down vote up
def download_file(url):
    r = requests.get(url, stream=True)
    fo = tempfile.NamedTemporaryFile(prefix='ai', delete=False)
    shutil.copyfileobj(r.raw, fo)
    fo.close()

    return fo.name 
Example 29
Project: deep-learning-note   Author: wdxtub   File: utils.py    MIT License 5 votes vote down vote up
def download_one_file(download_url,
                      local_dest,
                      expected_byte=None,
                      unzip_and_remove=False):
    """
    Download the file from download_url into local_dest
    if the file doesn't already exists.
    If expected_byte is provided, check if
    the downloaded file has the same number of bytes.
    If unzip_and_remove is True, unzip the file and remove the zip file
    """
    if os.path.exists(local_dest) or os.path.exists(local_dest[:-3]):
        print('%s already exists' % local_dest)
    else:
        print('Downloading %s' % download_url)
        local_file, _ = urllib.request.urlretrieve(download_url, local_dest)
        file_stat = os.stat(local_dest)
        if expected_byte:
            if file_stat.st_size == expected_byte:
                print('Successfully downloaded %s' % local_dest)
                if unzip_and_remove:
                    with gzip.open(local_dest, 'rb') as f_in, open(local_dest[:-3], 'wb') as f_out:
                        shutil.copyfileobj(f_in, f_out)
                    os.remove(local_dest)
            else:
                print('The downloaded file has unexpected number of bytes') 
Example 30
Project: video2commons   Author: toolforge   File: upload.py    GNU General Public License v3.0 5 votes vote down vote up
def handle_chunked(f, permpath, content_range):
    try:
        content_range = RE_CONTENT_RANGE.match(content_range)
        assert content_range, 'Invalid content range!'

        cr1, cr2, cr3 = [int(content_range.group(i)) for i in range(1, 4)]

        if os.path.isfile(permpath):
            size = stat(permpath)
        else:
            size = 0

        if size != cr1:
            raise WrongOffset(size)

        with open(permpath, 'ab') as dest:
            shutil.copyfileobj(f, dest)

    except WrongOffset as e:
        size = e.offset
    else:
        size = stat(permpath)
    if size < cr3:
        return 'Continue', {'offset': size}
    elif size > cr3:
        raise RuntimeError('What?! Uploaded file is larger than '
                           'what it is supposed to be?')
    return 'Success', {} 
Example 31
Project: google_streetview   Author: rrwen   File: helpers.py    MIT License 5 votes vote down vote up
def download(url, file_path):
  r = requests.get(url, stream=True)
  if r.status_code == 200: # if request is successful
    with open(file_path, 'wb') as f:
      r.raw.decode_content = True
      shutil.copyfileobj(r.raw, f) 
Example 32
Project: flasky   Author: RoseOu   File: util.py    MIT License 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 33
Project: CyberTK-Self   Author: CyberTKR   File: LineApi.py    GNU General Public License v2.0 5 votes vote down vote up
def sendVideoWithURL(self, to_, url):
        path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))

        r = requests.get(url, stream=True)
        if r.status_code == 200:
            with open(path, 'w') as f:
                shutil.copyfileobj(r.raw, f)
        else:
            raise Exception('Download video failure.')

        try:
            self.sendVideo(to_, path)
        except Exception as e:
            raise (e) 
Example 34
Project: CyberTK-Self   Author: CyberTKR   File: LineApi.py    GNU General Public License v2.0 5 votes vote down vote up
def sendAudioWithUrl(self, to_, url):
        path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))

        r = requests.get(url, stream=True)
        if r.status_code == 200:
            with open(path, 'w') as f:
                shutil.copyfileobj(r.raw, f)
        else:
            raise Exception('Download audio failure.')

        try:
            self.sendAudio(to_, path)
        except Exception as e:
            raise (e) 
Example 35
Project: github-review-slack-notifier   Author: Gidgidonihah   File: octocats.py    MIT License 5 votes vote down vote up
def _retrieve_rss_file():
    """ Download the RSS file locally. """
    if _should_retrieve_rss_file():
        with urllib.request.urlopen('http://feeds.feedburner.com/Octocats') as response, open(RSS_FILE, 'wb') as feed:
            shutil.copyfileobj(response, feed) 
Example 36
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: driver.py    Apache License 2.0 5 votes vote down vote up
def _generate_configdrive(self, instance, node, network_info,
                              extra_md=None, files=None):
        """Generate a config drive.

        :param instance: The instance object.
        :param node: The node object.
        :param network_info: Instance network information.
        :param extra_md: Optional, extra metadata to be added to the
                         configdrive.
        :param files: Optional, a list of paths to files to be added to
                      the configdrive.

        """
        if not extra_md:
            extra_md = {}

        i_meta = instance_metadata.InstanceMetadata(instance,
            content=files, extra_md=extra_md, network_info=network_info)

        with tempfile.NamedTemporaryFile() as uncompressed:
            with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb:
                cdb.make_drive(uncompressed.name)

            with tempfile.NamedTemporaryFile() as compressed:
                # compress config drive
                with gzip.GzipFile(fileobj=compressed, mode='wb') as gzipped:
                    uncompressed.seek(0)
                    shutil.copyfileobj(uncompressed, gzipped)

                # base64 encode config drive
                compressed.seek(0)
                return base64.b64encode(compressed.read()) 
Example 37
Project: Trusted-Platform-Module-nova   Author: BU-NU-CLOUD-SP16   File: utils.py    Apache License 2.0 5 votes vote down vote up
def stream_to(self, target_file):
        if self._tar_file is None:
            self._tar_file = self._as_tarfile()
            self._tar_info = self._tar_file.next()
        source_file = self._tar_file.extractfile(self._tar_info)
        shutil.copyfileobj(source_file, target_file)
        self._tar_file.close() 
Example 38
Project: sic   Author: Yanixos   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 39
Project: sic   Author: Yanixos   File: tarfile.py    GNU General Public License v3.0 5 votes vote down vote up
def makefile(self, tarinfo, targetpath):
        """Make a file called targetpath.
        """
        source = self.fileobj
        source.seek(tarinfo.offset_data)
        bufsize = self.copybufsize
        with bltn_open(targetpath, "wb") as target:
            if tarinfo.sparse is not None:
                for offset, size in tarinfo.sparse:
                    target.seek(offset)
                    copyfileobj(source, target, size, ReadError, bufsize)
                target.seek(tarinfo.size)
                target.truncate()
            else:
                copyfileobj(source, target, tarinfo.size, ReadError, bufsize) 
Example 40
Project: twitter-image-downloader   Author: morinokami   File: twt_img.py    MIT License 5 votes vote down vote up
def save_image(self, image, path, timestamp, size="large"):
        """Download and save an image to path.

        Args:
            image: The url of the image.
            path: The directory where the image will be saved.
            timestamp: The time that the image was uploaded.
                It is used for naming the image.
            size: Which size of images to download.
        """

        def print_status(s):
            import sys

            sys.stdout.write(u"\u001b[1K")
            print("\r{} {}".format(["-", "\\", "|", "/"][self.count % 4], s), end="")

        if image:
            # image's path with a new name
            ext = os.path.splitext(image)[1]
            name = timestamp + ext
            save_dest = os.path.join(path, name)

            # save the image in the specified directory if
            if not (os.path.exists(save_dest)):

                r = requests.get(image + ":" + size, stream=True)
                if r.status_code == 200:
                    with open(save_dest, "wb") as f:
                        r.raw.decode_content = True
                        shutil.copyfileobj(r.raw, f)
                    self.count += 1
                    print_status("{} saved".format(name))

            else:
                print_status("Skipping {}: already downloaded".format(name)) 
Example 41
Project: roberta   Author: dreamer   File: fakescripteval.py    GNU General Public License v2.0 5 votes vote down vote up
def download_item(i, num, name, desc):
    """Download a file to cache."""
    txt = desc['txt']
    url = desc['url']
    cache_file = xdg.cached_file(name)
    if os.path.isfile(cache_file):
        return
    log('downloading', url, 'to', cache_file)
    msg = '{}/{}: {}'.format(i, num, txt)
    # TODO use runtime dir instead of cache here
    with open(xdg.cached_file('desc.txt'), 'w') as msg_file:
        msg_file.write(msg)
    with urllib.request.urlopen(url) as resp, open(cache_file, 'wb') as out:
        shutil.copyfileobj(resp, out) 
Example 42
Project: AshsSDK   Author: thehappydinoa   File: util.py    MIT License 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 43
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def _merge_file_contents(input_files, output_file):
    for file_name in input_files:
        with open(file_name, 'r') as input_file:
            shutil.copyfileobj(input_file, output_file)
        output_file.flush() 
Example 44
Project: mx   Author: graalvm   File: mx.py    GNU General Public License v2.0 5 votes vote down vote up
def prePush(self, f):
        tgz = f + '.gz'
        logv('Compressing {}...'.format(f))
        if AbstractTARDistribution._has_gzip():
            with open(tgz, 'wb') as tar:
                # force, quiet, cat to stdout
                run([AbstractTARDistribution._gzip_binary(), '-f', '-q', '-c', f], out=tar)
        else:
            with gzip.open(tgz, 'wb') as gz, open(f, 'rb') as tar:
                shutil.copyfileobj(tar, gz)
        return tgz 
Example 45
Project: PyGallica   Author: ian-nai   File: iiif_api.py    GNU General Public License v3.0 5 votes vote down vote up
def iiif(id, region, size, rotation, quality, format):
    
        IIIF_BASEURL = 'https://gallica.bnf.fr/iiif/ark:/'
      
        url = "".join([IIIF_BASEURL, id, '/', region, '/', size, '/', rotation, '/', quality, '.', format])
        print url
        filename = "".join([id, '.', format])
        dirname = os.path.dirname(filename)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
        response = requests.get(url, stream=True)
        with open(filename, 'wb') as out_file:
            shutil.copyfileobj(response.raw, out_file)
            del response 
Example 46
Project: PyGallica   Author: ian-nai   File: document_api.py    GNU General Public License v3.0 5 votes vote down vote up
def simple_images(id, res):
    
        IMAGES_BASEURL = 'https://gallica.bnf.fr/ark:/'
    
        url = "".join([IMAGES_BASEURL, id, '/', res])
        print url
    
        response = requests.get(url, stream=True)
        with open('simple_image.jpg', 'wb') as out_file:
            shutil.copyfileobj(response.raw, out_file)
            del response 
Example 47
Project: botbuilder-python   Author: microsoft   File: bidaf_model_runtime.py    MIT License 5 votes vote down vote up
def init_bidaf(bidaf_model_dir: str, download_ntlk_punkt: bool = False) -> bool:
        if os.path.isdir(bidaf_model_dir):
            print("bidaf model directory already present..", file=sys.stderr)
        else:
            print("Creating bidaf model directory..", file=sys.stderr)
            os.makedirs(bidaf_model_dir, exist_ok=True)

        # Download Punkt Sentence Tokenizer
        if download_ntlk_punkt:
            nltk.download("punkt", download_dir=bidaf_model_dir)
            nltk.download("punkt")

        # Download bidaf onnx model
        onnx_model_file = os.path.abspath(os.path.join(bidaf_model_dir, "bidaf.onnx"))

        print(f"Checking file {onnx_model_file}..", file=sys.stderr)
        if os.path.isfile(onnx_model_file):
            print("bidaf.onnx downloaded already!", file=sys.stderr)
        else:
            print("Downloading bidaf.onnx...", file=sys.stderr)
            response = requests.get(
                "https://onnxzoo.blob.core.windows.net/models/opset_9/bidaf/bidaf.onnx",
                stream=True,
            )
            with open(onnx_model_file, "wb") as f:
                response.raw.decode_content = True
                shutil.copyfileobj(response.raw, f)
        return True 
Example 48
Project: Telegram_upload   Author: itspooya   File: downloader.py    GNU General Public License v3.0 5 votes vote down vote up
def download(url):
    try:
        with requests.get(url,stream=True) as r:

            fname = ''
            if "Content-Disposition" in r.headers.keys():
                fname = re.findall("filename=(.+)", r.headers["Content-Disposition"])[0]
            else:
                fname = url.split("/")[-1]
            with open("{}".format(fname), "wb") as f:
                shutil.copyfileobj(r.raw, f)
    except RequestException as e:
        print(e)
    return fname 
Example 49
Project: JJMumbleBot   Author: DuckBoss   File: image_helper.py    GNU General Public License v3.0 5 votes vote down vote up
def download_image_requests(img_url):
    utils.clear_directory(utils.get_temporary_img_dir())
    img_ext = img_url.rsplit('.', 1)[1]
    s = requests.Session()
    r = s.get(img_url, headers={'User-Agent': 'Mozilla/5.0'})
    if r.status_code == 200:
        with open(f"image.{img_ext}", 'wb') as f:
            r.raw.decode_content = True
            shutil.copyfileobj(r.raw, f)
        debug_print(f"Downloaded image from: {img_url}")
    else:
        debug_print(f"403 Error! - {img_url}") 
Example 50
Project: remixt   Author: amcpherson   File: utils.py    MIT License 5 votes vote down vote up
def merge_files(output_filename, *input_filenames):
    with open(output_filename, 'w') as output_file:
        for input_filename in input_filenames:
            with open(input_filename, 'r') as input_file:
                shutil.copyfileobj(input_file, output_file) 
Example 51
Project: myenglishcloud   Author: johncadigan   File: cache_functions.py    MIT License 5 votes vote down vote up
def replace_image(request, image, imageid, situation):
	picture = DBSession.query(Picture).filter_by(id=imageid).first()
	input_file = image.file
	DBSession.flush()
	if situation == 'lesson':
		pic = Image.open(input_file)
		pic.thumbnail((150,150), Image.ANTIALIAS)
		file_path = os.path.join(picture_directory, '{0}.jpeg'.format(picture.name))
		pic.save(file_path, 'jpeg')
	if situation == 'test':
		pic = Image.open(input_file)
		pic.thumbnail((300,300), Image.ANTIALIAS)
		file_path = os.path.join(picture_directory, '{0}.jpeg'.format(picture.name))
		pic.save(file_path, 'jpeg')
		filename = '{0}.jpg'.format(picture.name)
		file_path = os.path.join(picture_directory, filename)
		with open(file_path, 'wb') as output_file:
			shutil.copyfileobj(input_file, output_file)
	if situation =='profile':
		pic = Image.open(input_file)
		pic.thumbnail((128,128), Image.ANTIALIAS)
		file_path = os.path.join(picture_directory, '{0}.thumb'.format(picture.name))
		pic.save(file_path, 'jpeg')
		pic.thumbnail((50,50), Image.ANTIALIAS)
		file_path = os.path.join(picture_directory, '{0}.thumbnail'.format(picture.name))
		pic.save(file_path, 'jpeg')
	return picture.id 
Example 52
Project: jawfish   Author: war-and-code   File: tarfile.py    MIT License 5 votes vote down vote up
def makefile(self, tarinfo, targetpath):
        """Make a file called targetpath.
        """
        source = self.fileobj
        source.seek(tarinfo.offset_data)
        with bltn_open(targetpath, "wb") as target:
            if tarinfo.sparse is not None:
                for offset, size in tarinfo.sparse:
                    target.seek(offset)
                    copyfileobj(source, target, size)
            else:
                copyfileobj(source, target, tarinfo.size)
            target.seek(tarinfo.size)
            target.truncate() 
Example 53
Project: jawfish   Author: war-and-code   File: zipfile.py    MIT License 5 votes vote down vote up
def _extract_member(self, member, targetpath, pwd):
        """Extract the ZipInfo object 'member' to a physical
           file on the path targetpath.
        """
        # build the destination pathname, replacing
        # forward slashes to platform specific separators.
        arcname = member.filename.replace('/', os.path.sep)

        if os.path.altsep:
            arcname = arcname.replace(os.path.altsep, os.path.sep)
        # interpret absolute pathname as relative, remove drive letter or
        # UNC path, redundant separators, "." and ".." components.
        arcname = os.path.splitdrive(arcname)[1]
        invalid_path_parts = ('', os.path.curdir, os.path.pardir)
        arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
                                   if x not in invalid_path_parts)
        if os.path.sep == '\\':
            # filter illegal characters on Windows
            arcname = self._sanitize_windows_name(arcname, os.path.sep)

        targetpath = os.path.join(targetpath, arcname)
        targetpath = os.path.normpath(targetpath)

        # Create all upper directories if necessary.
        upperdirs = os.path.dirname(targetpath)
        if upperdirs and not os.path.exists(upperdirs):
            os.makedirs(upperdirs)

        if member.filename[-1] == '/':
            if not os.path.isdir(targetpath):
                os.mkdir(targetpath)
            return targetpath

        with self.open(member, pwd=pwd) as source, \
             open(targetpath, "wb") as target:
            shutil.copyfileobj(source, target)

        return targetpath 
Example 54
Project: Repobot   Author: Desgard   File: util.py    MIT License 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 55
Project: Repobot   Author: Desgard   File: server.py    MIT License 5 votes vote down vote up
def copyfile(self, source, outputfile):
        """Copy all data between two file objects.

        The SOURCE argument is a file object open for reading
        (or anything with a read() method) and the DESTINATION
        argument is a file object open for writing (or
        anything with a write() method).

        The only reason for overriding this would be to change
        the block size or perhaps to replace newlines by CRLF
        -- note however that this the default server uses this
        to copy binary data as well.

        """
        shutil.copyfileobj(source, outputfile) 
Example 56
Project: auth0-cli-utilities   Author: dmark   File: list_metadata_fields.py    MIT License 5 votes vote down vote up
def main():
    """main"""

    auth0 = connect_to_auth0()

    export_job = {}
    export_job_id = auth0.jobs.export_users(export_job)['id']

    while auth0.jobs.get(export_job_id)['status'] != 'completed':
        time.sleep(5)

    export_job_output_dir = './output'
    export_job_output_gz_filename = 'export.csv.gz'
    export_job_output_gz_filepath = (export_job_output_dir +
                                     '/' + export_job_output_gz_filename)
    urllib.request.urlretrieve(auth0.jobs.get(export_job_id)['location'],
                               export_job_output_gz_filepath)

    export_job_output_csv_filename = 'export.csv'
    export_job_output_csv_filepath = (export_job_output_dir +
                                      '/' + export_job_output_csv_filename)
    with gzip.open(export_job_output_gz_filepath, 'rb') as gz_file:
        with open(export_job_output_csv_filepath, 'wb') as csv_file:
            shutil.copyfileobj(gz_file, csv_file)

    users = []

    with open(export_job_output_csv_filepath, 'r') as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        next(csv_reader)
        for user in csv_reader:
            users.append(auth0.users.get(user[0],
                                         fields=['user_metadata',
                                                 'app_metadata'],
                                         include_fields=True))
            time.sleep(1)

    for s in sorted(walk_keys(users)):
        print(s) 
Example 57
Project: auth0-cli-utilities   Author: dmark   File: db_backup.py    MIT License 5 votes vote down vote up
def main():
    """main"""

    auth0 = connect_to_auth0()

    export_job = {}
    export_job_id = auth0.jobs.export_users(export_job)['id']

    while auth0.jobs.get(export_job_id)['status'] != 'completed':
        time.sleep(5)

    export_job_output_dir = './output'
    export_job_output_gz_filename = 'export.csv.gz'
    export_job_output_gz_filepath = (export_job_output_dir +
                                     '/' + export_job_output_gz_filename)
    urllib.request.urlretrieve(auth0.jobs.get(export_job_id)['location'],
                               export_job_output_gz_filepath)

    export_job_output_csv_filename = 'export.csv'
    export_job_output_csv_filepath = (export_job_output_dir +
                                      '/' + export_job_output_csv_filename)
    with gzip.open(export_job_output_gz_filepath, 'rb') as gz_file:
        with open(export_job_output_csv_filepath, 'wb') as csv_file:
            shutil.copyfileobj(gz_file, csv_file)

    users = []

    with open(export_job_output_csv_filepath, 'r') as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        next(csv_reader)
        for user in csv_reader:
            users.append(auth0.users.get(user[0]))
            time.sleep(1)

    print(json.dumps(users, sort_keys=True, indent=2, separators=(',', ':'))) 
Example 58
Project: fetchLandsatSentinelFromGoogleCloud   Author: vascobnunes   File: fels.py    MIT License 5 votes vote down vote up
def download_file(url, destination_filename):
    """Function to download files using pycurl lib"""
    with requests.get(url, stream=True) as r:
        with open(destination_filename, 'wb') as f:
            shutil.copyfileobj(r.raw, f) 
Example 59
Project: chattR   Author: patrickstocklin   File: util.py    GNU General Public License v2.0 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 60
Project: chattR   Author: patrickstocklin   File: archive.py    GNU General Public License v2.0 5 votes vote down vote up
def extract(self, to_path):
        # note: python<=2.5 doesn't seem to know about pax headers, filter them
        members = [member for member in self._archive.getmembers()
                   if member.name != 'pax_global_header']
        leading = self.has_leading_dir(x.name for x in members)
        for member in members:
            name = member.name
            if leading:
                name = self.split_leading_dir(name)[1]
            filename = os.path.join(to_path, name)
            if member.isdir():
                if filename and not os.path.exists(filename):
                    os.makedirs(filename)
            else:
                try:
                    extracted = self._archive.extractfile(member)
                except (KeyError, AttributeError) as exc:
                    # Some corrupt tar files seem to produce this
                    # (specifically bad symlinks)
                    print("In the tar file %s the member %s is invalid: %s" %
                          (name, member.name, exc))
                else:
                    dirname = os.path.dirname(filename)
                    if dirname and not os.path.exists(dirname):
                        os.makedirs(dirname)
                    with open(filename, 'wb') as outfile:
                        shutil.copyfileobj(extracted, outfile)
                finally:
                    if extracted:
                        extracted.close() 
Example 61
Project: instaloader   Author: instaloader   File: instaloader.py    MIT License 5 votes vote down vote up
def save_caption(self, filename: str, mtime: datetime, caption: str) -> None:
        """Updates picture caption / Post metadata info"""
        def _elliptify(caption):
            pcaption = caption.replace('\n', ' ').strip()
            return '[' + ((pcaption[:29] + u"\u2026") if len(pcaption) > 31 else pcaption) + ']'
        filename += '.txt'
        caption += '\n'
        pcaption = _elliptify(caption)
        bcaption = caption.encode("UTF-8")
        with suppress(FileNotFoundError):
            with open(filename, 'rb') as file:
                file_caption = file.read()
            if file_caption.replace(b'\r\n', b'\n') == bcaption.replace(b'\r\n', b'\n'):
                try:
                    self.context.log(pcaption + ' unchanged', end=' ', flush=True)
                except UnicodeEncodeError:
                    self.context.log('txt unchanged', end=' ', flush=True)
                return None
            else:
                def get_filename(index):
                    return filename if index == 0 else '{0}_old_{2:02}{1}'.format(*os.path.splitext(filename), index)

                i = 0
                while os.path.isfile(get_filename(i)):
                    i = i + 1
                for index in range(i, 0, -1):
                    os.rename(get_filename(index - 1), get_filename(index))
                try:
                    self.context.log(_elliptify(file_caption.decode("UTF-8")) + ' updated', end=' ', flush=True)
                except UnicodeEncodeError:
                    self.context.log('txt updated', end=' ', flush=True)
        try:
            self.context.log(pcaption, end=' ', flush=True)
        except UnicodeEncodeError:
            self.context.log('txt', end=' ', flush=True)
        with open(filename, 'wb') as text_file:
            shutil.copyfileobj(BytesIO(bcaption), text_file)
        os.utime(filename, (datetime.now().timestamp(), mtime.timestamp())) 
Example 62
Project: instaloader   Author: instaloader   File: instaloader.py    MIT License 5 votes vote down vote up
def save_location(self, filename: str, location: PostLocation, mtime: datetime) -> None:
        """Save post location name and Google Maps link."""
        filename += '_location.txt'
        location_string = (location.name + "\n" +
                           "https://maps.google.com/maps?q={0},{1}&ll={0},{1}\n".format(location.lat,
                                                                                        location.lng))
        with open(filename, 'wb') as text_file:
            shutil.copyfileobj(BytesIO(location_string.encode()), text_file)
        os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
        self.context.log('geo', end=' ', flush=True) 
Example 63
Project: instaloader   Author: instaloader   File: instaloadercontext.py    MIT License 5 votes vote down vote up
def write_raw(self, resp: Union[bytes, requests.Response], filename: str) -> None:
        """Write raw response data into a file.

        .. versionadded:: 4.2.1"""
        self.log(filename, end=' ', flush=True)
        with open(filename, 'wb') as file:
            if isinstance(resp, requests.Response):
                shutil.copyfileobj(resp.raw, file)
            else:
                file.write(resp) 
Example 64
Project: TornadoWeb   Author: VxCoder   File: resumable.py    Apache License 2.0 5 votes vote down vote up
def __download_part(self, part):
        self._report_progress(self.__finished_size)

        with open(self.__tmp_file, 'rb+') as f:
            f.seek(part.start, os.SEEK_SET)

            headers = {'If-Match': self.objectInfo.etag,
                       'If-Unmodified-Since': utils.http_date(self.objectInfo.mtime)}
            result = self.bucket.get_object(self.key, byte_range=(part.start, part.end - 1), headers=headers)
            shutil.copyfileobj(result, f)

        self.__finish_part(part) 
Example 65
Project: sagemaker-xgboost-container   Author: aws   File: sagemaker_pipe.py    Apache License 2.0 5 votes vote down vote up
def local_retriever(src, sink):
    if os.path.isfile(src):
        logging.debug('streaming file: {}'.format(src))
        with open(src, 'rb') as src:
            shutil.copyfileobj(src, sink)
    else:
        for root, dirs, files in os.walk(src):
            logging.debug('file list: {}'.format(files))
            for file in files:
                src_path = root + '/' + file
                logging.debug('streaming file: {}'.format(src_path))
                if os.path.isfile(src_path):   # ignore special files
                    with open(src_path, 'rb') as src:
                        shutil.copyfileobj(src, sink) 
Example 66
Project: sagemaker-xgboost-container   Author: aws   File: sagemaker_pipe.py    Apache License 2.0 5 votes vote down vote up
def gunzip(src_retriever, tmp_path, sink):
    with open(tmp_path, 'wb') as tmp:
        src_retriever(tmp)
    with gzip.open(tmp_path, 'rb') as inflated:
        shutil.copyfileobj(inflated, sink) 
Example 67
Project: Flask_Blog   Author: sugarguo   File: util.py    GNU General Public License v3.0 5 votes vote down vote up
def copy_stream(self, instream, outfile, encoding=None):
        assert not os.path.isdir(outfile)
        self.ensure_dir(os.path.dirname(outfile))
        logger.info('Copying stream %s to %s', instream, outfile)
        if not self.dry_run:
            if encoding is None:
                outstream = open(outfile, 'wb')
            else:
                outstream = codecs.open(outfile, 'w', encoding=encoding)
            try:
                shutil.copyfileobj(instream, outstream)
            finally:
                outstream.close()
        self.record_as_written(outfile) 
Example 68
Project: saga   Author: sagasurvey   File: external.py    MIT License 5 votes vote down vote up
def run_casjobs_with_sciserver(
        query, output_path, compress=True, context="DR14", username=None, password=None
    ):
        """
        Run a single casjobs and download casjobs output using SciServer

        Parameters
        ----------
        query : str, output from construct_query
        output_path : str
        compress : bool, optional
        context : str, optional
        username : str, optional
        password : str, optional

        Notes
        -----
        Follow these instructions to use `run_casjobs_with_sciserver`

        1. Install SciServer:
           follow the instruction at https://github.com/sciserver/SciScript-Python

        2. Register an account at https://portal.sciserver.org/login-portal/Account/Register

        3. Edit your `.bashrc`:
            export SCISERVER_USER='username'
            export SCISERVER_PASS='password'
        """
        username = username or os.getenv("SCISERVER_USER")
        password = password or os.getenv("SCISERVER_PASS")
        if not (_HAS_SCISERVER_ and username and password):
            raise ValueError("You are not setup to run casjobs with SciServer")
        SciServer.Authentication.login(username, password)
        r = SciServer.CasJobs.executeQuery(query, context=context, format="fits")
        file_open = gzip.open if compress else open
        with file_open(output_path, "wb") as f_out:
            shutil.copyfileobj(r, f_out) 
Example 69
Project: LaserTOF   Author: kyleuckert   File: _datasource.py    MIT License 5 votes vote down vote up
def _cache(self, path):
        """Cache the file specified by path.

        Creates a copy of the file in the datasource cache.

        """
        # We import these here because importing urllib2 is slow and
        # a significant fraction of numpy's total import time.
        if sys.version_info[0] >= 3:
            from urllib.request import urlopen
            from urllib.error import URLError
        else:
            from urllib2 import urlopen
            from urllib2 import URLError

        upath = self.abspath(path)

        # ensure directory exists
        if not os.path.exists(os.path.dirname(upath)):
            os.makedirs(os.path.dirname(upath))

        # TODO: Doesn't handle compressed files!
        if self._isurl(path):
            try:
                openedurl = urlopen(path)
                f = _open(upath, 'wb')
                try:
                    shutil.copyfileobj(openedurl, f)
                finally:
                    f.close()
                    openedurl.close()
            except URLError:
                raise URLError("URL not found: %s" % path)
        else:
            shutil.copyfile(path, upath)
        return upath 
Example 70
Project: pyblish-win   Author: pyblish   File: zipfile.py    GNU Lesser General Public License v3.0 4 votes vote down vote up
def _extract_member(self, member, targetpath, pwd):
        """Extract the ZipInfo object 'member' to a physical
           file on the path targetpath.
        """
        # build the destination pathname, replacing
        # forward slashes to platform specific separators.
        arcname = member.filename.replace('/', os.path.sep)

        if os.path.altsep:
            arcname = arcname.replace(os.path.altsep, os.path.sep)
        # interpret absolute pathname as relative, remove drive letter or
        # UNC path, redundant separators, "." and ".." components.
        arcname = os.path.splitdrive(arcname)[1]
        arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
                    if x not in ('', os.path.curdir, os.path.pardir))
        if os.path.sep == '\\':
            # filter illegal characters on Windows
            illegal = ':<>|"?*'
            if isinstance(arcname, unicode):
                table = {ord(c): ord('_') for c in illegal}
            else:
                table = string.maketrans(illegal, '_' * len(illegal))
            arcname = arcname.translate(table)
            # remove trailing dots
            arcname = (x.rstrip('.') for x in arcname.split(os.path.sep))
            arcname = os.path.sep.join(x for x in arcname if x)

        targetpath = os.path.join(targetpath, arcname)
        targetpath = os.path.normpath(targetpath)

        # Create all upper directories if necessary.
        upperdirs = os.path.dirname(targetpath)
        if upperdirs and not os.path.exists(upperdirs):
            os.makedirs(upperdirs)

        if member.filename[-1] == '/':
            if not os.path.isdir(targetpath):
                os.mkdir(targetpath)
            return targetpath

        with self.open(member, pwd=pwd) as source, \
             file(targetpath, "wb") as target:
            shutil.copyfileobj(source, target)

        return targetpath 
Example 71
Project: Ansible-Example-AB2018   Author: umit-ozturk   File: get_url.py    MIT License 4 votes vote down vote up
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''):
    """
    Download data from the url and store in a temporary file.

    Return (tempfile, info about the request)
    """
    if module.check_mode:
        method = 'HEAD'
    else:
        method = 'GET'

    rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers, method=method)

    if info['status'] == 304:
        module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))

    # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
    if info['status'] == -1:
        module.fail_json(msg=info['msg'], url=url, dest=dest)

    if info['status'] != 200 and not url.startswith('file:/') and not (url.startswith('ftp:/') and info.get('msg', '').startswith('OK')):
        module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)

    # create a temporary file and copy content to do checksum-based replacement
    if tmp_dest:
        # tmp_dest should be an existing dir
        tmp_dest_is_dir = os.path.isdir(tmp_dest)
        if not tmp_dest_is_dir:
            if os.path.exists(tmp_dest):
                module.fail_json(msg="%s is a file but should be a directory." % tmp_dest)
            else:
                module.fail_json(msg="%s directory does not exist." % tmp_dest)
    else:
        tmp_dest = getattr(module, 'tmpdir', None)

    fd, tempname = tempfile.mkstemp(dir=tmp_dest)

    f = os.fdopen(fd, 'wb')
    try:
        shutil.copyfileobj(rsp, f)
    except Exception as e:
        os.remove(tempname)
        module.fail_json(msg="failed to create temporary content file: %s" % to_native(e), exception=traceback.format_exc())
    f.close()
    rsp.close()
    return tempname, info 
Example 72
Project: Parallel.GAMIT   Author: demiangomez   File: zipfile.py    GNU General Public License v3.0 4 votes vote down vote up
def _extract_member(self, member, targetpath, pwd):
        """Extract the ZipInfo object 'member' to a physical
           file on the path targetpath.
        """
        # build the destination pathname, replacing
        # forward slashes to platform specific separators.
        arcname = member.filename.replace('/', os.path.sep)

        if os.path.altsep:
            arcname = arcname.replace(os.path.altsep, os.path.sep)
        # interpret absolute pathname as relative, remove drive letter or
        # UNC path, redundant separators, "." and ".." components.
        arcname = os.path.splitdrive(arcname)[1]
        arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
                    if x not in ('', os.path.curdir, os.path.pardir))
        if os.path.sep == '\\':
            # filter illegal characters on Windows
            illegal = ':<>|"?*'
            if isinstance(arcname, unicode):
                table = {ord(c): ord('_') for c in illegal}
            else:
                table = string.maketrans(illegal, '_' * len(illegal))
            arcname = arcname.translate(table)
            # remove trailing dots
            arcname = (x.rstrip('.') for x in arcname.split(os.path.sep))
            arcname = os.path.sep.join(x for x in arcname if x)

        targetpath = os.path.join(targetpath, arcname)
        targetpath = os.path.normpath(targetpath)

        # Create all upper directories if necessary.
        upperdirs = os.path.dirname(targetpath)
        if upperdirs and not os.path.exists(upperdirs):
            os.makedirs(upperdirs)

        if member.filename[-1] == '/':
            if not os.path.isdir(targetpath):
                os.mkdir(targetpath)
            return targetpath

        with self.open(member, pwd=pwd) as source, \
             file(targetpath, "wb") as target:
            shutil.copyfileobj(source, target)

        return targetpath 
Example 73
Project: bit9platform   Author: carbonblack   File: VirusTotal.py    MIT License 4 votes vote down vote up
def uploadFileToVT(self, pa):
        scanId = None

        if self.download_location:
            # This is if we want to locally download file from Bit9
            # (in the case shared folder is not accessible)
            localFilePath = self.download_location + "\\temp.zip"
            self.bit9.retrieve_analyzed_file(pa['id'], localFilePath)
        else:
            # Easier option, if Bit9 shared folder can be accessed directly
            localFilePath = pa['uploadPath']

        try:
            # the zip file returned by Bit9 should have only one directory entry in it,
            # the file to be analyzed. Extract that file for analysis. This is done since
            # Bit9 retains the original file path information in the zip file, which may
            # include sensitive/personal information that we don't want to disclose to VT.
            z = zipfile.ZipFile(localFilePath)
            infp = z.open(z.filelist[0])
            outfp = tempfile.NamedTemporaryFile()
            shutil.copyfileobj(infp, outfp)
        except Exception as e:
            pa['analysisStatus'] = 4  # (status: Error)
            pa['analysisError'] = 'Received error when attempting to unzip file from Bit9: %s' % str(e)
            # Update Bit9 status for this file
            self.bit9.update('v1/pendingAnalysis', pa)
            log.exception("Could not unzip file from Bit9 for analysis of %s" % pa)
            return scanId

        outfp.seek(0)
        files = {'file': outfp}
        try:
            r = requests.post(self.vt_url + "/file/scan", files=files, params={'apikey': self.vt_token})
            isError = (r.status_code >= 400)
            # we got VT scanId. We will need it to check status of the scan at later time
            if r.status_code == 200:
                scanId = r.json()['scan_id']
        except:
            log.exception("Could not send file %s to VirusTotal" % (pa,))
            isError = True
        finally:
            outfp.close()

        if isError:
            # Report to Bit9 that we had error analyzing this file. This means we will not try analysis again.
            pa['analysisStatus'] = 4  # (status: Error)
            pa['analysisError'] = 'VirusTotal returned error when attempting to send file for scanning'
        else:
            # Tell Bit9 that we are waiting for the scan to finish
            pa['analysisStatus'] = 1 # (status: Analyzing)

        # Update Bit9 status for this file
        self.bit9.update('v1/pendingAnalysis', pa)
        return scanId 
Example 74
Project: fetchLandsatSentinelFromGoogleCloud   Author: vascobnunes   File: fels.py    MIT License 4 votes vote down vote up
def get_landsat_image(url, outputdir, overwrite=False, sat="TM"):
    """Download a Landsat image file."""
    img = os.path.basename(url)
    if sat == "TM":
        possible_bands = ['B1.TIF', 'B2.TIF', 'B3.TIF', 'B4.TIF', 'B5.TIF',
                          'B6.TIF', 'B7.TIF', 'GCP.txt', 'VER.txt', 'VER.jpg',
                          'ANG.txt', 'BQA.TIF', 'MTL.txt']
    elif sat == "OLI_TIRS":
        possible_bands = ['B1.TIF', 'B2.TIF', 'B3.TIF', 'B4.TIF', 'B5.TIF',
                          'B6.TIF', 'B7.TIF', 'B8.TIF', 'B9.TIF', 'B10.TIF',
                          "B11.TIF", 'BQA.TIF', 'MTL.txt']
    elif sat == "ETM":
        possible_bands = ['B1.TIF', 'B2.TIF', 'B3.TIF', 'B4.TIF', 'B5.TIF',
                          'B6.TIF', 'B6_VCID_1.TIF', 'B6_VCID_2.TIF', 'B7.TIF',
                          'B8.TIF', 'B9.TIF', 'BQA.TIF', 'MTL.txt']
    else:
        possible_bands = ['B1.TIF', 'B2.TIF', 'B3.TIF', 'B4.TIF', 'B5.TIF',
                          'B6.TIF', 'B6_VCID_1.TIF', 'B6_VCID_2.TIF', 'B7.TIF',
                          'B8.TIF', 'B9.TIF', 'BQA.TIF', 'MTL.txt']

    target_path = os.path.join(outputdir, img)

    if not os.path.isdir(target_path):
        os.makedirs(target_path)
    for band in possible_bands:
        complete_url = url + "/" + img + "_" + band
        target_file = os.path.join(target_path, img + "_" + band)
        if os.path.exists(target_file) and not overwrite:
            print(target_file, "exists and --overwrite option was not used. Skipping image download")
            continue
        try:
            content = urlopen(complete_url, timeout=600)
        except HTTPError:
            print("Could not find", band, "band image file.")
            continue
        except URLError:
            print("Timeout, Restart=======>")
            time.sleep(10)
            get_landsat_image(url, outputdir, overwrite, sat)
            return
        with open(target_file, 'wb') as f:
            try:
                shutil.copyfileobj(content, f)
            except socket.timeout:
                print("Socket Timeout, Restart=======>")
                time.sleep(10)
                get_landsat_image(url, outputdir, overwrite, sat)
                return
            print("Downloaded", target_file) 
Example 75
Project: fetchLandsatSentinelFromGoogleCloud   Author: vascobnunes   File: fels.py    MIT License 4 votes vote down vote up
def get_sentinel2_image(url, outputdir, overwrite=False, partial=False, noinspire=False):
    """
    Collect the entire dir structure of the image files from the
    manifest.safe file and build the same structure in the output
    location."""
    img = os.path.basename(url)
    target_path = os.path.join(outputdir, img)
    target_manifest = os.path.join(target_path, "manifest.safe")
    if not os.path.exists(target_path) or overwrite:
        os.makedirs(target_path)
        manifest_url = url + "/manifest.safe"
        content = urlopen(manifest_url)
        with open(target_manifest, 'wb') as f:
            shutil.copyfileobj(content, f)
        with open(target_manifest, 'r') as manifest_file:
            manifest_lines = manifest_file.read().split()
        for line in manifest_lines:
            if 'href' in line:
                rel_path = line[7:line.find("><") - 2]
                abs_path = os.path.join(target_path, *rel_path.split('/')[1:])
                if not os.path.exists(os.path.dirname(abs_path)):
                    os.makedirs(os.path.dirname(abs_path))
                try:
                    download_file(url + rel_path, abs_path)
                except HTTPError as error:
                    print("Error downloading {} [{}]".format(url + rel_path, error))
                    continue
        granule = os.path.dirname(os.path.dirname(get_S2_image_bands(target_path, "B01")))
        for extra_dir in ("AUX_DATA", "HTML"):
            if not os.path.exists(os.path.join(target_path, extra_dir)):
                os.makedirs(os.path.join(target_path, extra_dir))
            if not os.path.exists(os.path.join(granule, extra_dir)):
                os.makedirs(os.path.join(granule, extra_dir))
        if not manifest_lines:
            print()
    if partial:
        tile_chk = check_full_tile(get_S2_image_bands(target_path, "B01"))
        if tile_chk == 'Partial':
            print("Removing partial tile image files...")
            shutil.rmtree(target_path)
    if not noinspire:
        inspire_file = os.path.join(target_path, "INSPIRE.xml")
        if os.path.isfile(inspire_file):
            inspire_path = get_S2_INSPIRE_title(inspire_file)
            if os.path.basename(target_path) != inspire_path:
                os.rename(target_path, inspire_path)
        else:
            print(f"File {inspire_file} could not be found.") 
Example 76
Project: ConvLab   Author: ConvLab   File: allennlp_file_utils.py    MIT License 4 votes vote down vote up
def get_from_cache(url: str, cache_dir: str = None) -> str:
    """
    Given a URL, look for the corresponding dataset in the local cache.
    If it's not there, download it. Then return the path to the cached file.
    """
    if cache_dir is None:
        cache_dir = CACHE_DIRECTORY

    os.makedirs(cache_dir, exist_ok=True)

    # Get eTag to add to filename, if it exists.
    if url.startswith("s3://"):
        etag = s3_etag(url)
    else:
        with session_with_backoff() as session:
            response = session.head(url, allow_redirects=True)
        if response.status_code != 200:
            raise IOError("HEAD request failed for url {} with status code {}"
                          .format(url, response.status_code))
        etag = response.headers.get("ETag")

    filename = url_to_filename(url, etag)

    # get cache path to put the file
    cache_path = os.path.join(cache_dir, filename)

    if not os.path.exists(cache_path):
        # Download to temporary file, then copy to cache dir once finished.
        # Otherwise you get corrupt cache entries if the download gets interrupted.
        with tempfile.NamedTemporaryFile() as temp_file:
            logger.info("%s not found in cache, downloading to %s", url, temp_file.name)

            # GET file object
            if url.startswith("s3://"):
                s3_get(url, temp_file)
            else:
                http_get(url, temp_file)

            # we are copying the file before closing it, so flush to avoid truncation
            temp_file.flush()
            # shutil.copyfileobj() starts at the current position, so go to the start
            temp_file.seek(0)

            logger.info("copying %s to cache at %s", temp_file.name, cache_path)
            with open(cache_path, 'wb') as cache_file:
                shutil.copyfileobj(temp_file, cache_file)

            logger.info("creating metadata file for %s", cache_path)
            meta = {'url': url, 'etag': etag}
            meta_path = cache_path + '.json'
            with open(meta_path, 'w') as meta_file:
                json.dump(meta, meta_file)

            logger.info("removing temp file %s", temp_file.name)

    return cache_path 
Example 77
Project: gphotos-sync   Author: gilesknap   File: GooglePhotosDownload.py    MIT License 4 votes vote down vote up
def do_download_file(self, base_url: str, media_item: DatabaseMedia):
        """ Runs in a process pool and does a download of a single media item.
        """
        if self.case_insensitive_fs:
            relative_folder = str(media_item.relative_folder).lower()
            filename = str(media_item.filename).lower()
        else:
            relative_folder = media_item.relative_folder
            filename = media_item.filename
        local_folder = self._root_folder / relative_folder
        local_full_path = local_folder / filename

        if media_item.is_video():
            download_url = '{}=dv'.format(base_url)
            timeout = self.video_timeout
        else:
            download_url = '{}=d'.format(base_url)
            timeout = self.image_timeout
        temp_file = tempfile.NamedTemporaryFile(dir=local_folder, delete=False)
        t_path = Path(temp_file.name)

        try:
            response = self._session.get(download_url, stream=True,
                                         timeout=timeout)
            response.raise_for_status()
            shutil.copyfileobj(response.raw, temp_file)
            temp_file.close()
            temp_file = None
            response.close()
            t_path.rename(local_full_path)
            create_date = Utils.safe_timestamp(media_item.create_date)
            os.utime(str(local_full_path),
                     (Utils.safe_timestamp(media_item.modify_date).timestamp(),
                      create_date.timestamp()))
            if _use_win_32:
                file_handle = win32file.CreateFile(
                    str(local_full_path),
                    win32file.GENERIC_WRITE, 0,
                    None, win32con.OPEN_EXISTING,
                    0, None)
                win32file.SetFileTime(
                    file_handle, *(create_date,) * 3)
                file_handle.close()
            os.chmod(str(local_full_path), 0o666 & ~self.current_umask)
        except KeyboardInterrupt:
            log.debug("User cancelled download thread")
            raise
        finally:
            if temp_file:
                temp_file.close()
            if t_path.exists():
                t_path.unlink() 
Example 78
Project: OpenBottle   Author: xiaozhuchacha   File: downloader.py    MIT License 4 votes vote down vote up
def _unzip_iter(filename, root, verbose=True):
    if verbose:
        sys.stdout.write('Unzipping %s' % os.path.split(filename)[1])
        sys.stdout.flush()

    try: zf = zipfile.ZipFile(filename)
    except zipfile.error as e:
        yield ErrorMessage(filename, 'Error with downloaded zip file')
        return
    except Exception as e:
        yield ErrorMessage(filename, e)
        return

    # Get lists of directories & files
    namelist = zf.namelist()
    dirlist = set()
    for x in namelist:
        if x.endswith('/'):
            dirlist.add(x)
        else:
            dirlist.add(x.rsplit('/',1)[0] + '/')
    filelist = [x for x in namelist if not x.endswith('/')]

    # Create the target directory if it doesn't exist
    if not os.path.exists(root):
        os.mkdir(root)

    # Create the directory structure
    for dirname in sorted(dirlist):
        pieces = dirname[:-1].split('/')
        for i in range(len(pieces)):
            dirpath = os.path.join(root, *pieces[:i+1])
            if not os.path.exists(dirpath):
                os.mkdir(dirpath)

    # Extract files.
    for i, filename in enumerate(filelist):
        filepath = os.path.join(root, *filename.split('/'))

        try:
            with open(filepath, 'wb') as dstfile, zf.open(filename) as srcfile:
                shutil.copyfileobj(srcfile, dstfile)
        except Exception as e:
            yield ErrorMessage(filename, e)
            return

        if verbose and (i*10/len(filelist) > (i-1)*10/len(filelist)):
            sys.stdout.write('.')
            sys.stdout.flush()
    if verbose:
        print()

######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime. 
Example 79
Project: OpenBottle   Author: xiaozhuchacha   File: downloader.py    MIT License 4 votes vote down vote up
def _unzip_iter(filename, root, verbose=True):
    if verbose:
        sys.stdout.write('Unzipping %s' % os.path.split(filename)[1])
        sys.stdout.flush()

    try: zf = zipfile.ZipFile(filename)
    except zipfile.error as e:
        yield ErrorMessage(filename, 'Error with downloaded zip file')
        return
    except Exception as e:
        yield ErrorMessage(filename, e)
        return

    # Get lists of directories & files
    namelist = zf.namelist()
    dirlist = set()
    for x in namelist:
        if x.endswith('/'):
            dirlist.add(x)
        else:
            dirlist.add(x.rsplit('/',1)[0] + '/')
    filelist = [x for x in namelist if not x.endswith('/')]

    # Create the target directory if it doesn't exist
    if not os.path.exists(root):
        os.mkdir(root)

    # Create the directory structure
    for dirname in sorted(dirlist):
        pieces = dirname[:-1].split('/')
        for i in range(len(pieces)):
            dirpath = os.path.join(root, *pieces[:i+1])
            if not os.path.exists(dirpath):
                os.mkdir(dirpath)

    # Extract files.
    for i, filename in enumerate(filelist):
        filepath = os.path.join(root, *filename.split('/'))

        try:
            with open(filepath, 'wb') as dstfile, zf.open(filename) as srcfile:
                shutil.copyfileobj(srcfile, dstfile)
        except Exception as e:
            yield ErrorMessage(filename, e)
            return

        if verbose and (i*10/len(filelist) > (i-1)*10/len(filelist)):
            sys.stdout.write('.')
            sys.stdout.flush()
    if verbose:
        print()

######################################################################
# Index Builder
######################################################################
# This may move to a different file sometime. 
Example 80
Project: LaserTOF   Author: kyleuckert   File: backend_pgf.py    MIT License 4 votes vote down vote up
def _print_pdf_to_fh(self, fh, *args, **kwargs):
        w, h = self.figure.get_figwidth(), self.figure.get_figheight()

        try:
            # create temporary directory for compiling the figure
            tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
            fname_pgf = os.path.join(tmpdir, "figure.pgf")
            fname_tex = os.path.join(tmpdir, "figure.tex")
            fname_pdf = os.path.join(tmpdir, "figure.pdf")

            # print figure to pgf and compile it with latex
            self.print_pgf(fname_pgf, *args, **kwargs)

            latex_preamble = get_preamble()
            latex_fontspec = get_fontspec()
            latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}

\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
            with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
                fh_tex.write(latexcode)

            texcommand = get_texcommand()
            cmdargs = [str(texcommand), "-interaction=nonstopmode",
                       "-halt-on-error", "figure.tex"]
            try:
                check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
            except subprocess.CalledProcessError as e:
                raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))

            # copy file contents to target
            with open(fname_pdf, "rb") as fh_src:
                shutil.copyfileobj(fh_src, fh)
        finally:
            try:
                shutil.rmtree(tmpdir)
            except:
                TmpDirCleaner.add(tmpdir)