Python urllib.request() Examples

The following are code examples for showing how to use urllib.request(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: o2g   Author: hiposfer   File: web.py    MIT License 6 votes vote down vote up
def dl_osm_from_overpass(area, bbox):
    if not area and not bbox:
        raise Exception('At lease area or bbox must be given.')

    overpass_query = build_overpass_query(area, bbox)
    overpass_api_url = "http://overpass-api.de/api/interpreter"

    filepath = tempfile.mktemp(suffix='_overpass.osm')
    resp = urlopen(overpass_api_url, overpass_query.encode('utf-8'))

    if resp.status == 200:
        with open(filepath, 'w') as osm:
            osm.write(resp.read().decode('utf-8'))
        return os.path.split(filepath)[-1], filepath
    else:
        raise urllib.request.HTTPError(
            'Error calling Overpass API. Status: {}, reason: {}'.format(
                resp.status, resp.reason))

    raise Exception("Can't download data form overpass api.") 
Example 2
Project: LabShare   Author: Bartzi   File: utils.py    GNU General Public License v2.0 6 votes vote down vote up
def login_required_ajax(function=None, redirect_field_name=None):
    """
    Just make sure the user is authenticated to access a certain ajax view

    Otherwise return a HttpResponse 401 - authentication required
    instead of the 302 redirect of the original Django decorator
    """
    def _decorator(view_func):
        def _wrapped_view(request, *args, **kwargs):
            if request.user.is_authenticated:
                return view_func(request, *args, **kwargs)
            else:
                return HttpResponse(status=401)
        return _wrapped_view

    if function is None:
        return _decorator
    else:
        return _decorator(function) 
Example 3
Project: recipe-box   Author: rtlee9   File: get_pictures.py    MIT License 6 votes vote down vote up
def save_picture(recipes_raw, url):
    recipe = recipes_raw[url]
    path_save = path.join(
        config.path_img, '{}.jpg'.format(URL_to_filename(url)))
    if not path.isfile(path_save):
        if 'picture_link' in recipe:
            link = recipe['picture_link']
            if link is not None:
                try:
                    if 'epicurious' in url:
                        img_url = 'https://{}'.format(link[2:])
                        urllib.request.urlretrieve(img_url, path_save)
                    else:
                        urllib.request.urlretrieve(link, path_save)
                except:
                    print('Could not download image from {}'.format(link)) 
Example 4
Project: smoke-zephyr   Author: zeroSteiner   File: utilities.py    BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def download(url, filename=None):
	"""
	Download a file from a url and save it to disk.

	:param str url: The URL to fetch the file from.
	:param str filename: The destination file to write the data to.
	"""
	# requirements os, shutil, urllib.parse, urllib.request
	if not filename:
		url_parts = urllib.parse.urlparse(url)
		filename = os.path.basename(url_parts.path)
	url_h = urllib.request.urlopen(url)
	with open(filename, 'wb') as file_h:
		shutil.copyfileobj(url_h, file_h)
	url_h.close()
	return 
Example 5
Project: clacker   Author: wez   File: kicadpcb.py    GNU General Public License v2.0 6 votes vote down vote up
def resolveFootprintPath(self, footprint):
        libname, compname = footprint.split(':')
        lib = library_map[libname]
        if lib.startswith('https://'):
            url = urlparse(lib)
            if url.netloc == 'github.com':
                comps = url.path.split('/')[1:]
                repo = comps[0] + '/' + comps[1]
                if len(comps) > 2:
                    extra = '/' + '/'.join(comps[2:])
                else:
                    extra = ''
                lib = 'https://raw.githubusercontent.com/' + repo + '/master' + extra
                url = urlparse(lib)
            dirname = os.path.join(self._cache_dir, url.path[1:])
            if not os.path.isdir(dirname):
                os.makedirs(dirname)
            localname = os.path.join(dirname, compname + '.kicad_mod')
            lib += '/' + os.path.basename(localname)
            if not os.path.isfile(localname):
                print('Fetching %s as %s' % (lib, localname))
                with open(localname, 'wb') as f:
                    f.write(urllib.request.urlopen(lib).read())
            lib = dirname
        return lib, compname 
Example 6
Project: urllib-requests-adapter   Author: Matrixcoffee   File: __init__.py    Apache License 2.0 6 votes vote down vote up
def execute(self):
		timeout = self.timeout_seconds
		if timeout is None: timeout = GLOBAL_TIMEOUT_SECONDS
		try:
			if timeout is None:
				r = urllib.request.urlopen(self.rq, **_get_tls_parms())
			else:
				r = urllib.request.urlopen(self.rq, None, timeout, **_get_tls_parms())
		except urllib.error.HTTPError as e:
			self.status_code = e.code
			self.text = e.msg
			return self
		except (http.client.HTTPException, urllib.error.URLError, socket.timeout) as e:
			e2 = exceptions.RequestException("{}.{!r}".format(type(e).__module__, e))
			raise e2
		self._rdata = r.read()
		if DEBUG: print("Response._rdata set to:", repr(self._rdata), file=sys.stderr)
		self.map_request(r)
		r.close()
		return self 
Example 7
Project: igd-exporter   Author: yrro   File: igd.py    MIT License 6 votes vote down vote up
def probe_metric(service_url, metric):
    '''
    Query the service at the given URL for the given metric value.

    Assumptions are made about the name of the method and output parameters
    which are only valid for the WanCommonInterfaceConfig service.
    '''
    envelope = E(QName(ns['s'], 'Envelope'), {QName(ns['s'], 'encodingStyle'): 'http://schemas.xmlsoap.org/soap/encoding/'})
    body = sE(envelope, QName(ns['s'], 'Body'))
    method = sE(body, QName(ns['i'], 'Get{}'.format(metric)))
    request_tree = ET(envelope)
    with io.BytesIO() as out:
        out.write(b'<?xml version="1.0"?>')
        request_tree.write(out, encoding='utf-8')
        out.write(b'\r\n') # or else my Belkin F5D8236-4 never responds...
        req = urllib.request.Request(service_url, out.getvalue())

    req.add_header('Content-Type', 'text/xml')
    req.add_header('SOAPAction', '"{}#{}"'.format(ns['i'], 'Get{}'.format(metric)))

    with urllib.request.urlopen(req) as result:
        result_tree = ElementTree.parse(result)
        return int(result_tree.findtext('.//New{}'.format(metric), namespaces=ns)) 
Example 8
Project: NiujiaoDebugger   Author: MrSrc   File: test_http_cookiejar.py    GNU General Public License v3.0 6 votes vote down vote up
def test_netscape_misc(self):
        # Some additional Netscape cookies tests.
        c = CookieJar()
        headers = []
        req = urllib.request.Request("http://foo.bar.acme.com/foo")

        # Netscape allows a host part that contains dots
        headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
        res = FakeResponse(headers, "http://www.acme.com/foo")
        c.extract_cookies(res, req)

        # and that the domain is the same as the host without adding a leading
        # dot to the domain.  Should not quote even if strange chars are used
        # in the cookie value.
        headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
        res = FakeResponse(headers, "http://www.acme.com/foo")
        c.extract_cookies(res, req)

        req = urllib.request.Request("http://foo.bar.acme.com/foo")
        c.add_cookie_header(req)
        self.assertIn("PART_NUMBER=3,4", req.get_header("Cookie"))
        self.assertIn("Customer=WILE_E_COYOTE",req.get_header("Cookie")) 
Example 9
Project: repo.natko1412   Author: natko1412   File: functions.py    GNU General Public License v2.0 6 votes vote down vote up
def get_shows_mreza(tagy):
    url='http://mreza.tv/video/'
    shows=[]
    request=urllib.urlopen(url)
    html=request.read()
    soup=bs(html)

    tag=soup.find("div", {"id":"wrapper-glavni"})
    tag=tag.find("section",{"class":"%s"%tagy})
   
    pom=tag.findAll("h5")
    for i in range(len(pom)):
        pomy=pom[i].findAll("a")[0]
        ime=pomy.get("title")
        link=pomy.get("href")
        shows+=[[link,ime]]
    return shows 
Example 10
Project: grove   Author: jaredthecoder   File: utils.py    MIT License 5 votes vote down vote up
def parse_auth_header(auth_header):
    """Parse the authentication header sent on authenticated requests"""

    if auth_header is None:
        return None
    try:
        auth_type, param_strs = auth_header.split(" ", 1)
        items = urllib.request.parse_http_list(param_strs)
        opts = urllib.request.parse_keqv_list(items)
    except Exception as e:
        import traceback
        traceback.print_exc()
        return None
    return opts 
Example 11
Project: grove   Author: jaredthecoder   File: utils.py    MIT License 5 votes vote down vote up
def require_login(func):
    """Decorator function that checks if the current user is logged in"""

    def new_func(*args, **kwargs):
        auth_opts = parse_auth_header(request.headers.get('Authorization'))
        try:
            token = auth_opts['token']
        except (KeyError, TypeError) as e:
            abort(401)
            return
        user = User.query.filter_by(auth_token=token).first()
        if len(user) > 1:
            current_app.logger.error(
                'More than one user with id: {}'.format(token))
            abort(401)
        if user is None or len(user) == 0:
            current_app.logger.error(
                "User for the given authorization token does not exist.")
            abort(401)
            return

        return func(user=user.first(), *args, **kwargs)
    return new_func


# External OAuth Configs 
Example 12
Project: grove   Author: jaredthecoder   File: utils.py    MIT License 5 votes vote down vote up
def abort_not_exist(_id, _type):
    """Abort the request if the entity does not exist."""

    abort(404,
          message="{} {} does not exist. Please try again with a different {}".format(_type, _id, _type)) 
Example 13
Project: grove   Author: jaredthecoder   File: utils.py    MIT License 5 votes vote down vote up
def abort_cannot_update(_id, _type):
    """Abort the request if the entity cannot be updated."""

    abort(400,
          message="Cannot update {} {}. Please try again.".format(_type, _id)) 
Example 14
Project: grove   Author: jaredthecoder   File: utils.py    MIT License 5 votes vote down vote up
def abort_cannot_create(_type):
    """Abort the request if the entity cannot be created."""

    abort(400,
          message='Cannot create {} because you have not supplied the proper parameters.'.format(_type)) 
Example 15
Project: o2g   Author: hiposfer   File: web.py    MIT License 5 votes vote down vote up
def dl_osm_from_url(url):
    filename = tempfile.mktemp(suffix=pathlib.Path(url).name)
    filepath, headers =\
        urllib.request.urlretrieve(url, filename=filename)
    return pathlib.Path(url).name, filepath 
Example 16
Project: motion-tracking   Author: dansbecker   File: download_images.py    MIT License 5 votes vote down vote up
def run(self):
        print('Images to request: ', self.imgs_to_request)
        with concurrent.futures.ThreadPoolExecutor(max_workers=self.nb_workers) as worker_pool:
            for _, fname, img_src in self.image_urls:
                if ((self.imgs_requested + self.imgs_previously_captured) % 5000 == 0):
                    print('Images requested this run: ', self.imgs_requested)
                    print('Images skipped because already captured: ', self.imgs_previously_captured)
                    print('Failed image requests: ', self.failed_img_requests)
                worker_pool.submit(self.get_one_img(fname, img_src)) 
Example 17
Project: motion-tracking   Author: dansbecker   File: download_images.py    MIT License 5 votes vote down vote up
def get_one_img(self, fname, url):
        local_img_path = join(self.target_path, fname)
        if exists(local_img_path):
            self.imgs_previously_captured += 1
            return
        try:
            self.imgs_requested += 1
            url, _ = urllib.request.urlretrieve(url, local_img_path)
        except:
            self.failed_img_requests += 1
            self.failed_to_capture.append((url, local_img_path)) 
Example 18
Project: PheKnowLator   Author: callahantiff   File: EdgeDictionary.py    Apache License 2.0 5 votes vote down vote up
def queries_uniprot_api(data_file):
        """Searches a list of entities against the Uniprot API (uniprot.org/help/api_idmapping).

        Args:
            data_file (str): A filepath containing data to map.

        Returns:
            A dictionary of results returned from mapping identifiers.

        Raises:
            An exception is raised if the generated dictionary does not have the same number of rows as the what was
            returned by the API.
        """
        proteins = list(set([x.split('\\t')[1] for x in open(data_file).read().split('!')[-1].split('\\n')[1:]]))
        params = {'from': 'ACC+ID', 'to': 'P_ENTREZGENEID', 'format': 'tab', 'query': ' '.join(proteins)}

        data = urllib.parse.urlencode(params).encode('utf-8')
        requests = urllib.request.Request('https://www.uniprot.org/uploadlists/', data)
        results = urllib.request.urlopen(requests).read().decode('utf-8')

        # convert results to dictionary
        api_results = {}
        for res in results.split('\n')[1:-1]:
            res_row = res.split('\t')
            if str(res_row[0]) in api_results.keys():
                api_results[str(res_row[0])].append('http://purl.uniprot.org/geneid/' + str(res_row[1]))

            else:
                api_results[str(res_row[0])] = ['http://purl.uniprot.org/geneid/' + str(res_row[1])]

        # CHECK - all URLs returned an data file
        if len(api_results) == 0:
            raise Exception('ERROR: API returned no data')
        else:
            return api_results 
Example 19
Project: SublimeKSP   Author: nojanath   File: ksp_plugin.py    GNU General Public License v3.0 5 votes vote down vote up
def read_file_function(self, filepath):
        if filepath.startswith('http://'):
            from urllib.request import urlopen
            s = urlopen(filepath, timeout=5).read().decode('utf-8')
            return re.sub('\r+\n*', '\n', s)

        if self.base_path:
            filepath = os.path.join(self.base_path, filepath)
        filepath = os.path.abspath(filepath)
        view = CompileKspThread.find_view_by_filename(filepath, self.base_path)
        if view is None:
            s = codecs.open(filepath, 'r', 'utf-8').read()
            return re.sub('\r+\n*', '\n', s)
        else:
            return view.substr(sublime.Region(0, view.size())) 
Example 20
Project: LabShare   Author: Bartzi   File: utils.py    GNU General Public License v2.0 5 votes vote down vote up
def send_reservation_mail_for(request, gpu):
    if gpu.reservations.count() > 1:
        current_reservation = gpu.reservations.order_by("time_reserved").first()
        email_addresses = [address.email for address in current_reservation.user.email_addresses.all()]
        email_addresses.append(current_reservation.user.email)
        send_mail(
            "New reservation on GPU",
            render(request, "mails/new_reservation.txt", {
                    "gpu": gpu,
                    "reservation": current_reservation
                }).content.decode('utf-8'),
            settings.DEFAULT_FROM_EMAIL,
            email_addresses,
        ) 
Example 21
Project: minipos   Author: simon-v   File: bch.py    Apache License 2.0 5 votes vote down vote up
def jsonload(url):
	'''Load a web page and return the resulting JSON object'''
	request = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
	with urllib.request.urlopen(request, timeout=TIMEOUT) as webpage:
		data = str(webpage.read(), 'UTF-8')
		data = json.loads(data)
	return data 
Example 22
Project: web-scraper   Author: keeper-of-data   File: howstuffworks.py    MIT License 5 votes vote down vote up
def get_save_path(self, url, crumbs=None):
        """
        Pass in the url of the article and crumbs list if you have them
        :param url: url of the article
        :param curmbs: list of bread crumbs, if not passed, a request will be sent to parse them
        :return: relative path & save path as a string
        """
        if crumbs is None:
            crumbs = self.get_crumbs(url)

        rel_path = 'articles'
        # Add crumbs to path
        for crumb in crumbs:
            crumb = self.sanitize(crumb.replace(' ', '_'))
            rel_path = os.path.join(rel_path, crumb)

        # Add article title to path
        title = url.split('/')[-1].split('.')[0]
        rel_path = os.path.join(rel_path, title)
        rel_path += '/'
        rel_path = os.path.normcase(rel_path)
        rel_path = rel_path.replace("\\", "/")  # Replace windows '\\' with web safe '/'
        # Create full path
        full_save_path = os.path.normpath(os.path.join(self._base_dir, rel_path)) + '/'
        full_save_path = os.path.normcase(full_save_path)
        return (rel_path, full_save_path) 
Example 23
Project: web-scraper   Author: keeper-of-data   File: howstuffworks.py    MIT License 5 votes vote down vote up
def get_crumbs(self, url=None, soup=None):
        """
        Use soup if you have it, that way les requests are made to the site.
        :param url: url of article
        :param soup: soup of the page
        :return: list of crumbs
        """
        # Only request site if needed
        if soup is not None:
            page_soup = soup
        elif url is not None:
            try:
                page_soup = self.get_site(url, self._url_header)
            except RequestsError as e:
                raise CrumbsError("Cannot get_site")
        else:
            return []

        crumbs = []
        try:
            for crumb in page_soup.find("div", {"class": "breadcrumb"}).find_all("a"):
                crumbs.append(crumb.get_text().strip())
        except AttributeError:
            raise CrumbsError("No bread crumbs found")

        return crumbs 
Example 24
Project: sanic   Author: huge-success   File: test_worker.py    MIT License 5 votes vote down vote up
def test_gunicorn_worker(gunicorn_worker):
    with urllib.request.urlopen("http://localhost:1337/") as f:
        res = json.loads(f.read(100).decode())
    assert res["test"] 
Example 25
Project: sanic   Author: huge-success   File: test_worker.py    MIT License 5 votes vote down vote up
def test_gunicorn_worker_no_logs(gunicorn_worker_with_env_var):
    """
    if SANIC_ACCESS_LOG was set to False do not show access logs
    """
    with urllib.request.urlopen("http://localhost:1339/") as _:
        gunicorn_worker_with_env_var.kill()
        assert not gunicorn_worker_with_env_var.stdout.read() 
Example 26
Project: sanic   Author: huge-success   File: test_worker.py    MIT License 5 votes vote down vote up
def test_gunicorn_worker_with_logs(gunicorn_worker_with_access_logs):
    """
    default - show access logs
    """
    with urllib.request.urlopen("http://localhost:1338/") as _:
        gunicorn_worker_with_access_logs.kill()
        assert (
            b"(sanic.access)[INFO][127.0.0.1"
            in gunicorn_worker_with_access_logs.stdout.read()
        ) 
Example 27
Project: sanic   Author: huge-success   File: test_worker.py    MIT License 5 votes vote down vote up
def test_run_max_requests_exceeded(worker):
    loop = asyncio.new_event_loop()
    worker.ppid = 1
    worker.alive = True
    sock = mock.Mock()
    sock.cfg_addr = ("localhost", 8080)
    worker.sockets = [sock]
    worker.wsgi = mock.Mock()
    worker.connections = set()
    worker.log = mock.Mock()
    worker.loop = loop
    worker.servers = {
        "server1": {"requests_count": 14},
        "server2": {"requests_count": 15},
    }
    worker.max_requests = 10
    worker._run = mock.Mock(wraps=asyncio.coroutine(lambda *a, **kw: None))

    # exceeding request count
    _runner = asyncio.ensure_future(worker._check_alive(), loop=loop)
    loop.run_until_complete(_runner)

    assert not worker.alive
    worker.notify.assert_called_with()
    worker.log.info.assert_called_with(
        "Max requests exceeded, shutting " "down: %s", worker
    ) 
Example 28
Project: PythonHomework   Author: amjltc295   File: sample_task1.py    MIT License 5 votes vote down vote up
def task_8(
    img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
    '''
    Task 8: Module

    Args:
        img_url: address of an image

    Returns:
        result_img: an PIL Image

    Hints:
        * Make sure you have installed the PIL package
        * Take a look at utils.py first
        * You could easily find answers with Google
    '''
    from urllib import request
    result_img = None

    # TODO: download the image from img_url with the request module
    # and add your student ID on it with draw_text() in the utils module
    # under src/.

    # You are allowed to change the img_url to your own image URL.

    # Display the image:
    # result_img.show()
    # Note: please comment this line when hand in.

    # If you are running on a server, use
    # result.save('test.jpg')
    # and copy the file to local or use Jupyter Notebook to render.

    # End of TODO

    return result_img 
Example 29
Project: PythonHomework   Author: amjltc295   File: b07902113.py    MIT License 5 votes vote down vote up
def task_8(
    img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
    '''
    Task 8: Module

    Args:
        img_url: address of an image

    Returns:
        result_img: an PIL Image

    Hints:
        * Make sure you have installed the PIL package
        * Take a look at utils.py first
        * You could easily find answers with Google
    '''
    import urllib.request
    result_img = None
    from utils import draw_text
    # TODO: download the image from img_url with the request module
    # and add your student ID on it with draw_name() in the utils module
    # under src/.
    from PIL import Image
    urllib.request.urlretrieve(img_url,'/mnt/c/users/attis/desktop/pythonhomework/src/b07902113.jpg')
    with Image.open('/mnt/c/users/attis/desktop/pythonhomework/src/b07902113.jpg')as result_img:
    	result_img = draw_text(result_img,'b07902113') 
    	result_img.show()
    # result_img.show(req)
    # You are allowed to change the img_url to your own image URL.
    # Display the image. If you are running on a server, change this line to
    # result.save('test.jpg')
    
    # End of TODO

    return result_img 
Example 30
Project: PythonHomework   Author: amjltc295   File: b06902137.py    MIT License 5 votes vote down vote up
def task_8(
    img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
    '''
    Task 8: Module

    Args:
        img_url: address of an image

    Returns:
        result_img: an PIL Image

    Hints:
        * Make sure you have installed the PIL package
        * Take a look at utils.py first
        * You could easily find answers with Google
    '''
    from urllib import request
    result_img = None

    # TODO: download the image from img_url with the request module
    # and add your student ID on it with draw_name() in the utils module
    # under src/.

    # You are allowed to change the img_url to your own image URL.

    # Display the image:
    # result_img.show()
    # Note: please comment this line when hand in.

    # If you are running on a server, use
    # result.save('test.jpg')
    # and copy the file to local or use Jupyter Notebook to render.

    # End of TODO

    return result_img 
Example 31
Project: PythonHomework   Author: amjltc295   File: B05902135.py    MIT License 5 votes vote down vote up
def task_8(
    img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
    '''
    Task 8: Module

    Args:
        img_url: address of an image

    Returns:
        result_img: an PIL Image

    Hints:
        * Make sure you have installed the PIL package
        * Take a look at utils.py first
        * You could easily find answers with Google
    '''
    from urllib import request
    result_img = None

    # TODO: download the image from img_url with the request module
    # and add your student ID on it with draw_text() in the utils module
    # under src/.

    # You are allowed to change the img_url to your own image URL.

    # Display the image:
    # result_img.show()
    # Note: please comment this line when hand in.

    # If you are running on a server, use
    # result.save('test.jpg')
    # and copy the file to local or use Jupyter Notebook to render.

    # End of TODO

    return result_img 
Example 32
Project: steam_workshop_downloader   Author: Geam   File: workshop.py    MIT License 5 votes vote down vote up
def get_plugins_info (plugins_id_list):
    """Ask api the info on each plugin(s)
    Will return:
        - error:
            - None if no error encounter
            - the error an error occur
        - an array of plugin with the all the data return by the steam api
    Return error(error), array(array_of_plugins)
    """
    json_response = []
    error = None
    data = const_data['file']
    data['itemcount'] = len(plugins_id_list)
    for idx, plugin_id in enumerate(plugins_id_list):
        data['publishedfileids[' + str(idx) + ']'] = plugin_id
    encode_data = urllib.parse.urlencode(data).encode('ascii')
    try:
        response = urllib.request.urlopen(const_urls['file'], encode_data)
    except HTTPError as e:
        print("Server return " + str(e.code) + " error")
        error = e
    except URLError as e:
        print("Can't reach server: " + e.reason)
        error = e
    else:
        json_response = json.loads(response.read().decode('utf8'))
        json_response = json_response['response']['publishedfiledetails']
    return error, json_response 
Example 33
Project: ArkPlanner   Author: ycremar   File: MaterialPlanning.py    MIT License 5 votes vote down vote up
def request_data(url_stats, url_rules, save_path_stats, save_path_rules):
    """
    To request probability and convertion rules from web resources and store at local.
    Args:
        url_stats: string. url to the dropping rate stats data.
        url_rules: string. url to the composing rules data.
        save_path_stats: string. local path for storing the stats data.
        save_path_rules: string. local path for storing the composing rules data.
    Returns:
        material_probs: dictionary. Content of the stats json file.
        convertion_rules: dictionary. Content of the rules json file.
    """
    try:
        os.mkdir(os.path.dirname(save_path_stats))
    except:
        pass
    try:
        os.mkdir(os.path.dirname(save_path_rules))
    except:
        pass
    
    with urllib.request.urlopen(url_stats) as url:
        material_probs = json.loads(url.read().decode())
        with open(save_path_stats, 'w') as outfile:
            json.dump(material_probs, outfile)

    with urllib.request.urlopen(url_rules) as url:
        convertion_rules = json.loads(url.read().decode())
        with open(save_path_rules, 'w') as outfile:
            json.dump(convertion_rules, outfile)

    return material_probs, convertion_rules 
Example 34
Project: Repobot   Author: Desgard   File: MainClass.py    MIT License 5 votes vote down vote up
def rate_limiting(self):
        """
        First value is requests remaining, second value is request limit.
        :type: (int, int)
        """
        remaining, limit = self.__requester.rate_limiting
        if limit < 0:
            self.get_rate_limit()
        return self.__requester.rate_limiting 
Example 35
Project: Repobot   Author: Desgard   File: __init__.py    MIT License 5 votes vote down vote up
def from_import(module_name, *symbol_names, **kwargs):
    """
    Example use:
        >>> HTTPConnection = from_import('http.client', 'HTTPConnection')
        >>> HTTPServer = from_import('http.server', 'HTTPServer')
        >>> urlopen, urlparse = from_import('urllib.request', 'urlopen', 'urlparse')

    Equivalent to this on Py3:

        >>> from module_name import symbol_names[0], symbol_names[1], ...

    and this on Py2:

        >>> from future.moves.module_name import symbol_names[0], ...

    or:

        >>> from future.backports.module_name import symbol_names[0], ...

    except that it also handles dotted module names such as ``http.client``.
    """

    if PY3:
        return __import__(module_name)
    else:
        if 'backport' in kwargs and bool(kwargs['backport']):
            prefix = 'future.backports'
        else:
            prefix = 'future.moves'
        parts = prefix.split('.') + module_name.split('.')
        module = importlib.import_module(prefix + '.' + module_name)
        output = [getattr(module, name) for name in symbol_names]
        if len(output) == 1:
            return output[0]
        else:
            return output 
Example 36
Project: exposure   Author: yuanming-hu   File: fetch_fivek.py    MIT License 5 votes vote down vote up
def download(url, fn=None, path=None):
  if path is not None:
    os.makedirs(path, exist_ok=True)
  else:
    path = '.'

  if fn == None:
    fn = url.split('/')[-1]
  dest_fn = os.path.join(path, fn)
  u = urllib.request.urlopen(url)
  meta = u.info()
  file_size = int(meta.get_all("Content-Length")[0])

  print('Downloading: [{}] ({:.2f} MB)'.format(fn, file_size / 1024 ** 2))
  print('  URL        : {}'.format(url))
  print('  Destination: {}'.format(dest_fn))
  with open(dest_fn, 'wb') as f:
    
    downloaded = 0
    block_size = 65536
    
    while True:
      buffer = u.read(block_size)
      if not buffer:
        break
      f.write(buffer)
      downloaded += len(buffer)
      progress = '  {:.2f}MB  [{:3.2f}%]'.format(downloaded / 1024 ** 2, downloaded * 100 / file_size)
      print(progress, end='\r')
  print() 
Example 37
Project: FritzBoxPresenceDetection   Author: gasperphoenix   File: FritzBox.py    GNU General Public License v3.0 5 votes vote down vote up
def loadFritzBoxPage(self, url, param):
        """Method to read out a page from the FritzBox.
        
        The method reads out the given page from the FritzBox. It automatically includes a session id
        between url and param.
        
        Args:
            url (str):   URL of the page that shall be read out from the FritzBox.
            param (str): Additional parameters that shall be added to the URL.

        Returns:
            Requested page as string, None otherwise.
        """
        pageUrl = 'http://' + self.__server + ':' + self.__port + url + "?sid=" + self.sid.decode('utf-8') + param
        
        logger.debug("Load the FritzBox page: " + pageUrl)
        
        headers = { "Accept" : "application/xml",
                    "Content-Type" : "text/plain",
                    "User-Agent" : USER_AGENT}
    
        request = urllib.request.Request(pageUrl, headers = headers)
        
        try:
            response = urllib.request.urlopen(request)
        except:
            logger.error("Loading of the FritzBox page failed: %s" %(pageUrl))
            
            return None
        
        page = response.read()
        
        if (response.status != 200):
            logger.error("Unexpected feedback from FritzBox received: %s %s" % (response.status, response.reason))
                        
            return None
        else:  
            return page 
Example 38
Project: urllib-requests-adapter   Author: Matrixcoffee   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def __init__(self, url, method="GET", data=None, headers={}, origin_req_host=None, unverifiable=False):
		urllib.request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
		self.method = method.upper() 
Example 39
Project: urllib-requests-adapter   Author: Matrixcoffee   File: __init__.py    Apache License 2.0 5 votes vote down vote up
def request(method, endpoint, params, data=None, headers=None, verify=None, timeout=None):
	return Response(method, endpoint, params, data=data, headers=headers, timeout=timeout).execute() 
Example 40
Project: igd-exporter   Author: yrro   File: igd.py    MIT License 5 votes vote down vote up
def search_socket(sock, timeout, target='ssdp:all'):
    '''
    Transmit an SSDP search request to the local network.

    Filters results as specified by target.

    Returns a list of root device URLs.
    '''
    # XXX investigate use of other IPv6 multicast addresses.
    #   ff02::c - link scope
    #   ff05::c - site scope - do we have to use IPv6 MLD protocol to recieve
    #             messages from different network segments?
    addr = 'ff02::c' if sock.family == socket.AF_INET6 else '239.255.255.250'

    h = wsgiref.headers.Headers([])
    h['HOST'] = '[{}]'.format(addr) if sock.family == socket.AF_INET6 else addr
    h['MAN'] = '"ssdp:discover"'
    h['MX'] = str(timeout)
    h['ST'] = target

    with io.BytesIO() as out:
        out.write(b'M-SEARCH * HTTP/1.1\r\n')
        out.write(bytes(h))
        sock.sendto(out.getvalue(), (addr, 1900))

    result = []

    for n in range(100):
        sock.settimeout(timeout)
        try:
            buf, addr = sock.recvfrom(1024)
        except socket.timeout:
            break

        try:
            result.append(search_result(buf, addr))
        except Exception:
            traceback.print_exc()

    return result 
Example 41
Project: igd-exporter   Author: yrro   File: igd.py    MIT License 5 votes vote down vote up
def probe_device(target_url):
    '''
    Determine UDN and service control URL for the WanCommonInterfaceConfig
    service described by SCPD XML found at the given root device URL.
    '''
    with urllib.request.urlopen(target_url) as scpd:
        st = ElementTree.parse(scpd)

    url_base = st.findtext('d:URLBase', namespaces=ns)
    if url_base is None:
        url_base = target_url
    device = st.find("d:device[d:deviceType='urn:schemas-upnp-org:device:InternetGatewayDevice:1']/d:deviceList/d:device[d:deviceType='urn:schemas-upnp-org:device:WANDevice:1']", ns)
    url_path = device.findtext("d:serviceList/d:service[d:serviceType='urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1']/d:controlURL", namespaces=ns)

    return Device(device.findtext('d:UDN', namespaces=ns), urllib.parse.urljoin(url_base, url_path)) 
Example 42
Project: gradio-UI   Author: gradio-app   File: networking.py    GNU General Public License v2.0 5 votes vote down vote up
def url_request(url):
    try:
        req = urllib.request.Request(
            url=url, headers={"content-type": "application/json"}
        )
        res = urllib.request.urlopen(req, timeout=10)
        return res
    except Exception as e:
        raise RuntimeError(str(e)) 
Example 43
Project: gradio-UI   Author: gradio-app   File: networking.py    GNU General Public License v2.0 5 votes vote down vote up
def url_request(url):
    try:
        req = urllib.request.Request(
            url=url, headers={"content-type": "application/json"}
        )
        res = urllib.request.urlopen(req, timeout=10)
        return res
    except Exception as e:
        raise RuntimeError(str(e)) 
Example 44
Project: Dolphin-Updater   Author: nbear3   File: dolphinapp.py    GNU General Public License v3.0 5 votes vote down vote up
def run(self):
        """run thread task"""
        self.status.emit('Getting newest version...')
        try:
            link = get_dolphin_link()
        except:
            self.error.emit('Newest version not detected, please check your internet connection.')
            return

        file_name = os.path.basename(link)
        zip_file = os.path.join(DolphinUpdate.DOWNLOAD_PATH, file_name)
        to_directory, base_name = os.path.split(self.dir)

        try:
            self.status.emit('Downloading...')
            urllib.request.urlretrieve(link, zip_file)
            self.status.emit('Downloaded. Extracting...')

            if not os.path.isfile('res/7za.exe'):
                self.error.emit('Update failed: Please install 7-Zip')
                self.status.emit('Extraction Failed')
                return

            rename_7z(zip_file, 'Dolphin-x64', base_name)
            extract_7z(zip_file, to_directory)

            self.status.emit(file_name)
            self.status.emit('finished')

        except Exception as error:
            self.error.emit('Update Failed. %s' % error)
            self.status.emit('Update Failed.')

        finally:
            with suppress(FileNotFoundError):
                os.remove(zip_file) 
Example 45
Project: Dolphin-Updater   Author: nbear3   File: dolphincmd.py    GNU General Public License v3.0 5 votes vote down vote up
def _download_new(self):
        print('Getting newest version...')
        link = self._retrieve_current()
        current = os.path.basename(link)

        if os.path.basename(link) == self.version:
            print('You already have the most recent version.')
            return
        elif not os.path.isdir(self.path):
            print('Your dolphin folder path is invalid.')
            return

        file_name = os.path.basename(link)
        zip_file = os.path.join(self.DOWNLOAD_PATH, file_name)
        to_directory, base_name = os.path.split(self.path)

        try:
            print('Downloading...')
            urllib.request.urlretrieve(link, zip_file)
            print('Downloaded. Extracting...')

            if not os.path.isfile('res/7za.exe'):
                print('Update failed: Please install 7-Zip')
                return

            rename_7z(zip_file, 'Dolphin-x64', base_name)
            extract_7z(zip_file, to_directory)

            print('Update successful.')
            self.version = current
            self._udc.set_user_version(self.version)

        except Exception as error:
            print('Update Failed. %s' % error)

        finally:
            with suppress(FileNotFoundError):
                os.remove(zip_file) 
Example 46
Project: RouteOptimization   Author: andre-le   File: tsp.py    MIT License 5 votes vote down vote up
def create_distance_matrix(locations, transport_mode, distance_calculation):
# Create the distance matrix.
  dist_matrix = {}

  # complete distance matrix
  # precompute distance between location to have distance callback in O(1)
  if distance_calculation == "OSRM":
    url = "https://bi.ahamove.com/osrm/table/v1/driving/"
    for loc in locations:
      url += str(loc[1]) + "," + str(loc[0]) + ";"
    url = url[:-1] + "?annotations=distance"
    response = urllib.request.urlopen(url).read().decode('UTF-8')
    contents = json.loads(response)["distances"]
                
    if transport_mode == "N1":
      for index in xrange(len(locations)):
        contents[0][index] = 0

    if transport_mode == "1N":
      for index in xrange(len(locations)):
          contents[index][0] = 0
    dist_matrix = contents
  else:
    for from_node in xrange(len(locations)):
      dist_matrix[from_node] = {}
      for to_node in xrange(len(locations)):
        if (from_node == to_node) or (transport_mode == "1N" and to_node == 0) or (transport_mode == "N1" and from_node == 0):
          dist_matrix[from_node][to_node] = 0
        else:
          distance = (vincenty_distance(
            locations[from_node],
            locations[to_node]))
          dist_matrix[from_node][to_node] = distance
  return dist_matrix 
Example 47
Project: RouteOptimization   Author: andre-le   File: tsp.py    MIT License 5 votes vote down vote up
def create_distance_matrix(locations, transport_mode, distance_calculation):
# Create the distance matrix.
  dist_matrix = {}

  # complete distance matrix
  # precompute distance between location to have distance callback in O(1)
  if distance_calculation == "OSRM":
    url = "https://bi.ahamove.com/osrm/table/v1/driving/"
    for loc in locations:
      url += str(loc[1]) + "," + str(loc[0]) + ";"
    url = url[:-1] + "?annotations=distance"
    response = urllib.request.urlopen(url).read().decode('UTF-8')
    contents = json.loads(response)["distances"]
                
    if transport_mode == "N1":
      for index in xrange(len(locations)):
        contents[0][index] = 0

    if transport_mode == "1N":
      for index in xrange(len(locations)):
          contents[index][0] = 0
    dist_matrix = contents
  else:
    for from_node in xrange(len(locations)):
      dist_matrix[from_node] = {}
      for to_node in xrange(len(locations)):
        if (from_node == to_node) or (transport_mode == "1N" and to_node == 0) or (transport_mode == "N1" and from_node == 0):
          dist_matrix[from_node][to_node] = 0
        else:
          distance = (vincenty_distance(
            locations[from_node],
            locations[to_node]))
          dist_matrix[from_node][to_node] = distance
  return dist_matrix 
Example 48
Project: internet-archive   Author: emijrp   File: flickr2ia.py    GNU General Public License v3.0 5 votes vote down vote up
def download(url='', filename=''):
    print("Downloading", url)
    error = True
    while error:
        try:
            urllib.request.urlretrieve(url, filename)
            error = False
        except:
            print("Download error. Retrying")
            error = True
            time.sleep(10) 
Example 49
Project: rpi-water   Author: jcroucher   File: WeatherData.py    MIT License 5 votes vote down vote up
def poll(self):

            print("Updating Weather Data")

            weather_poll_freq = int(self.weather_settings['weather_poll_freq'])

            if weather_poll_freq <= 0:
                weather_poll_freq = 30

            weather_poll_freq *= 60  # Convert minutes to seconds

            threading.Timer(weather_poll_freq, self.poll).start()  # Repeat this function in the time specified

            # Make sure we have settings before going further
            for setting_key in self.weather_settings:
                if self.weather_settings[setting_key] == "" or self.weather_settings[setting_key] == 0:
                    print("No weather settings")
                    return

            # Wrapped up in a block as sometimes there is no response
            try:
                request = 'http://api.wunderground.com/api/'
                request += self.weather_settings['weather_api_key']
                request += '/geolookup/conditions/forecast/q/'
                request += self.weather_settings['weather_country'] + '/'
                request += self.weather_settings['weather_city'] + '.json'

                response = urllib.request.urlopen(request).read().decode('utf8')
                obj = json.loads(response)

                self.current_temperature = obj['current_observation']['temp_c']
                self.max_temp = obj['forecast']['simpleforecast']['forecastday'][0]['high']['celsius']
                self.min_temp = obj['forecast']['simpleforecast']['forecastday'][0]['low']['celsius']

                self.pop = obj['forecast']['simpleforecast']['forecastday'][0]['pop']
                self.pre_today = obj['current_observation']['precip_today_metric']

            except Exception as e:
                print("Error getting weather data") 
Example 50
Project: NiujiaoDebugger   Author: MrSrc   File: test_http_cookiejar.py    GNU General Public License v3.0 5 votes vote down vote up
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
    """Perform a single request / response cycle, returning Cookie: header."""
    req = urllib.request.Request(url)
    cookiejar.add_cookie_header(req)
    cookie_hdr = req.get_header("Cookie", "")
    headers = []
    for hdr in set_cookie_hdrs:
        headers.append("%s: %s" % (hdr_name, hdr))
    res = FakeResponse(headers, url)
    cookiejar.extract_cookies(res, req)
    return cookie_hdr