Python urllib._urlopener() Examples

The following are code examples for showing how to use urllib._urlopener(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: service.subtitles.tusubtitulocom   Author: k3lebra   File: TuSubtituloCom_Parse.py    GNU General Public License v2.0 6 votes vote down vote up
def getUrl(self,url):
		class AppURLopener(urllib.FancyURLopener):
			version = "App/1.7"
			def __init__(self, *args):
				urllib.FancyURLopener.__init__(self, *args)
			def add_referrer(self, url=None):
				if url:
					urllib._urlopener.addheader('Referer', url)

		urllib._urlopener = AppURLopener()
		urllib._urlopener.add_referrer("http://www.tusubtitulo.com/")
		try:
			response = urllib._urlopener.open(url)
			content    = response.read()
		except:
			content    = None
		return content 
Example 2
Project: service.subtitles.tusubtitulocom   Author: k3lebra   File: TuSubtituloCom.py    GNU General Public License v2.0 6 votes vote down vote up
def getUrl(self,url):
		class AppURLopener(urllib.FancyURLopener):
			version = "App/1.7"
			def __init__(self, *args):
				urllib.FancyURLopener.__init__(self, *args)
			def add_referrer(self, url=None):
				if url:
					urllib._urlopener.addheader('Referer', url)

		urllib._urlopener = AppURLopener()
		urllib._urlopener.add_referrer("http://www.tusubtitulo.com/")
		try:
			response = urllib._urlopener.open(url)
			content    = response.read()
		except:
			content    = None
		return content 
Example 3
Project: Yuki-Chan-The-Auto-Pentest   Author: Yukinoshita47   File: gooenum.py    MIT License 5 votes vote down vote up
def scrape_google(dom):
    """
    Function for enumerating sub-domains and hosts by scrapping Google. It returns a unique
    list if host name extracted from the HREF entries from the Google search.
    """
    results = []
    filtered = []
    searches = ["100", "200", "300", "400", "500"]
    data = ""
    urllib._urlopener = AppURLopener()
    user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
    headers = {'User-Agent': user_agent, }
    #opener.addheaders = [('User-Agent','Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)')]
    for n in searches:
        url = "http://google.com/search?hl=en&lr=&ie=UTF-8&q=%2B" + dom + "&start=" + n + "&sa=N&filter=0&num=100"
        try:
            sock = urllib.urlopen(url)
            data += sock.read()
            sock.close()
        except AttributeError:
            request = urllib.request.Request(url, None, headers)
            response = urllib.request.urlopen(request)
            data += str(response.read())
    results.extend(unique(re.findall("href=\"htt\w{1,2}:\/\/([^:?]*[a-b0-9]*[^:?]*\." + dom + ")\/", data)))
    # Make sure we are only getting the host
    for f in results:
        filtered.extend(re.findall("^([a-z.0-9^]*" + dom + ")", f))
    time.sleep(2)
    return unique(filtered) 
Example 4
Project: androlyze   Author: nachtmaar   File: playversion.py    MIT License 5 votes vote down vote up
def get_apk_version(package_name):
	''' Returns the version of the `package_name` in the play store '''
	urllib._urlopener = MyOpener()

	response = urllib.urlopen("https://play.google.com/store/apps/details?id=%s" % package_name)	
	
	data = response.read()
	parser = SwVersionLister()
	parser.feed(data)
	version = parser.swversion
	return version 
Example 5
Project: opentracing-python-instrumentation   Author: uber-common   File: urllib.py    MIT License 4 votes vote down vote up
def install_patches():
    if six.PY3:
        # The old urllib does not exist in Py3, so delegate to urllib2 patcher
        from . import urllib2
        urllib2.install_patches()
        return

    import urllib
    import urlparse

    log.info('Instrumenting urllib methods for tracing')

    class TracedURLOpener(urllib.FancyURLopener):

        def open(self, fullurl, data=None):
            parsed_url = urlparse.urlparse(fullurl)
            host = parsed_url.hostname or None
            port = parsed_url.port or None

            span = utils.start_child_span(
                operation_name='urllib', parent=current_span_func())

            span.set_tag(ext_tags.SPAN_KIND, ext_tags.SPAN_KIND_RPC_CLIENT)

            # use span as context manager so that its finish() method is called
            with span:
                span.set_tag(ext_tags.HTTP_URL, fullurl)
                if host:
                    span.set_tag(ext_tags.PEER_HOST_IPV4, host)
                if port:
                    span.set_tag(ext_tags.PEER_PORT, port)
                # TODO add callee service name
                # TODO add headers to propagate trace
                # cannot use super here, this is an old style class
                fileobj = urllib.FancyURLopener.open(self, fullurl, data)
                if fileobj.getcode() is not None:
                    span.set_tag(ext_tags.HTTP_STATUS_CODE, fileobj.getcode())

            return fileobj

        def retrieve(self, url, filename=None, reporthook=None, data=None):
            raise NotImplementedError

    urllib._urlopener = TracedURLOpener()