Python urlparse.parse_qs() Examples

The following are code examples for showing how to use urlparse.parse_qs(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: python-samples   Author: dek-odoo   File: WerkZeug.py    Apache License 2.0 6 votes vote down vote up
def getParamFromUrlRequest(request, filterStr):
    try:
        urldict = urlparse.parse_qs(request.environ['QUERY_STRING'])
        Debugg.info("SID ISSSS " + urldict[filterStr][0])
        # do not write here self.sid
        # doing that will blow up the whole thing
        sid = urldict[filterStr][0]
        Debugg.info(sid)
        if sid:
            print "return", sid
            return sid
    except:
        Debugg.info(
            __file__ + sys._getframe().f_code.co_name)
        return 0 
Example 2
Project: pytracts   Author: rmorlok   File: to_url.py    Apache License 2.0 6 votes vote down vote up
def decode_message(message_type, encoded_message, **kwargs):
    """Decode urlencoded content to message.

    Args:
      message_type: Message instance to merge URL encoded content into.
      encoded_message: URL encoded message.
      prefix: Prefix to append to field names of contained values.

    Returns:
      Decoded instance of message_type.
    """
    message = message_type()
    builder = URLEncodedRequestBuilder(message, **kwargs)
    arguments = urlparse.parse_qs(encoded_message, keep_blank_values=True)
    for argument, values in sorted(arguments.iteritems()):
        added = builder.add_parameter(argument, values)
        # Save off any unknown values, so they're still accessible.
        if not added:
            message.set_unrecognized_field(argument, values, messages.Variant.STRING)
    message.check_initialized()
    return message 
Example 3
Project: pytracts   Author: rmorlok   File: to_url_test.py    Apache License 2.0 6 votes vote down vote up
def testParameterPrefix(self):
        """Test using the 'prefix' parameter to encode_message."""

        class MyMessage(messages.Message):
            number = messages.IntegerField()
            names = messages.StringField(repeated=True)

        message = MyMessage()
        message.number = 10
        message.names = [u'Fred', u'Lisa']

        encoded_message = to_url.encode_message(message, prefix='prefix-')
        self.assertEquals({'prefix-number': ['10'],
                           'prefix-names': ['Fred', 'Lisa']},
                          urlparse.parse_qs(encoded_message))

        self.assertEquals(message, to_url.decode_message(MyMessage,
                                                                 encoded_message,
                                                                 prefix='prefix-')) 
Example 4
Project: pytracts   Author: rmorlok   File: to_url_test.py    Apache License 2.0 6 votes vote down vote up
def test_encode_message_repeated_message_field(self):
        class Animal(messages.Message):
            name = messages.StringField()
            size = messages.IntegerField()

        class Animals(messages.Message):
            animals = messages.MessageField(Animal, repeated=True)
            number = messages.IntegerField()

        dog = Animal(name='dog', size=12)
        cat = Animal(name='cat', size=10)
        tmp = Animals(animals=[dog, cat], number=2)

        encoded_message = to_url.encode_message(tmp)
        self.assertEquals({'number': ['2'],
                           'animals-0.name': ['dog'],
                           'animals-0.size': ['12'],
                           'animals-1.name': ['cat'],
                           'animals-1.size': ['10']},
                          urlparse.parse_qs(encoded_message))

        self.assertEquals(tmp, to_url.decode_message(Animals, encoded_message)) 
Example 5
Project: lambda_utils   Author: CloudHeads   File: api_gateway.py    MIT License 6 votes vote down vote up
def extract_body(event):
    def content_type():
        headers = event.get('headers', {})
        for key in headers:
            if key.lower() == 'content-type':
                return headers[key]
        return ''

    body = event.get('body')

    if 'application/json' in content_type():
        body = json.loads(event.get('body') or '{}')

    if 'application/x-www-form-urlencoded' in content_type():
        body = parse_qs(event.get('body') or '', keep_blank_values=True)

    return body 
Example 6
Project: django-oauth-toolkit-jwt   Author: Humanitec   File: views.py    MIT License 6 votes vote down vote up
def get(self, request, *args, **kwargs):
        response = super(JWTAuthorizationView, self).get(request, *args,
                                                         **kwargs)
        if request.GET.get('response_type', None) == 'token' \
                and response.status_code == 302:
            url = urlparse(response.url)
            params = parse_qs(url.fragment)
            if params:
                content = {
                    'access_token': params['access_token'][0],
                    'expires_in': int(params['expires_in'][0]),
                    'scope': params['scope'][0]
                }
                jwt = TokenView()._get_access_token_jwt(request, content)
                response = OAuth2ResponseRedirect(
                    '{}&access_token_jwt={}'.format(response.url, jwt),
                    response.allowed_schemes)
        return response 
Example 7
Project: gist-alfred   Author: danielecook   File: PaginatedList.py    MIT License 6 votes vote down vote up
def totalCount(self):
        if not self.__totalCount:
            params = {} if self.__nextParams is None else self.__nextParams.copy()
            # set per_page = 1 so the totalCount is just the number of pages
            params.update({"per_page": 1})
            headers, data = self.__requester.requestJsonAndCheck(
                "GET",
                self.__firstUrl,
                parameters=params,
                headers=self.__headers
            )
            if 'link' not in headers:
                if data and "total_count" in data:
                    self.__totalCount = data["total_count"]
                elif data:
                    self.__totalCount = len(data)
                else:
                    self.__totalCount = 0
            else:
                links = self.__parseLinkHeader(headers)
                lastUrl = links.get("last")
                self.__totalCount = int(parse_qs(lastUrl)['page'][0])
        return self.__totalCount 
Example 8
Project: Pancas   Author: Sup3Roque   File: trailer.py    GNU General Public License v2.0 6 votes vote down vote up
def search(self, url):
        try:
            query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]

            url = self.search_link % urllib.quote_plus(query) + self.key_link

            result = client.request(url)

            items = json.loads(result)['items']
            items = [(i['id']['videoId']) for i in items]

            for url in items:
                url = self.resolve(url)
                if not url is None: return url
        except:
            return 
Example 9
Project: Pancas   Author: Sup3Roque   File: hdcast.py    GNU General Public License v2.0 6 votes vote down vote up
def resolve(url):
    try:
        id = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]

        pageUrl = 'http://hdcast.me/embedplayer.php?id=%s&autoplay=true' % id
        swfUrl = 'http://p.jwpcdn.com/6/12/jwplayer.flash.swf'

        result = client.request(pageUrl, referer=pageUrl)

        streamer = result.replace('//file', '')
        streamer = re.compile("file *: *'(.+?)'").findall(streamer)[-1]

        token = re.compile('getJSON[(]"(.+?)".+?json[.]token').findall(result.replace('\n', ''))[-1]
        token = client.request(token, referer=pageUrl)
        token = re.compile('"token" *: *"(.+?)"').findall(token)[-1]

        url = '%s pageUrl=%s swfUrl=%s token=%s live=true timeout=20' % (streamer, pageUrl, swfUrl, token)

        return url
    except:
        return 
Example 10
Project: Pancas   Author: Sup3Roque   File: p2pcast.py    GNU General Public License v2.0 6 votes vote down vote up
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]

        page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page

        result = client.request(page, referer=referer)


        try:
            swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]
            swf = client.request(swf)
            swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(swf)[0]
        except:
            swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf'


        url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = base64.b64decode(url)
        url = '%s|User-Agent=%s&Referer=%s' % (url, urllib.quote_plus(client.agent()), urllib.quote_plus(swf))

        return url
    except:
        return 
Example 11
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 6 votes vote down vote up
def get_movie(self, imdb, title, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', title.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.moviesearch_link % query

            result = ''
            links = [self.link_1]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
                if 'Donate' in str(result): break

            imdb = re.sub('[^0-9]', '', imdb)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = '%s?%s' % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 12
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 6 votes vote down vote up
def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', tvshowtitle.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.tvsearch_link % query

            result = ''
            links = [self.link_1]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
                if 'Donate' in str(result): break

            imdb = re.sub('[^0-9]', '', imdb)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = '%s?%s' % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 13
Project: Pancas   Author: Sup3Roque   File: pubfilm_mv_tv.py    GNU General Public License v2.0 6 votes vote down vote up
def get_episode(self, url, imdb, tvdb, title, premiered, season, episode):
        return None
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            tvshowtitle = cleantitle.get(data['tvshowtitle'])
            year = re.findall('(\d{4})', premiered)[0]
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            result = cache.get(self.pubfilm_tvcache, 120)

            result = [i for i in result if tvshowtitle == i[1]]
            result = [i[0] for i in result if season == '%01d' % int(i[2])]
            result = [(i, re.findall('(\d{4})', [x for x in i.split('/') if not x == ''][-1])[0]) for i in result]
            result = [i[0] for i in result if i[1] == year][0]

            url = urlparse.urljoin(self.base_link, result)
            url = urlparse.urlparse(url).path
            url += '?episode=%01d' % int(episode)
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 14
Project: Pancas   Author: Sup3Roque   File: trailer.py    GNU General Public License v2.0 6 votes vote down vote up
def search(self, url):
        try:
            query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]

            url = self.search_link % urllib.quote_plus(query) + self.key_link

            result = client.request(url)

            items = json.loads(result)['items']
            items = [(i['id']['videoId']) for i in items]

            for url in items:
                url = self.resolve(url)
                if not url is None: return url
        except:
            return 
Example 15
Project: Pancas   Author: Sup3Roque   File: hdcast.py    GNU General Public License v2.0 6 votes vote down vote up
def resolve(url):
    try:
        id = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]

        pageUrl = 'http://hdcast.me/embedplayer.php?id=%s&autoplay=true' % id
        swfUrl = 'http://p.jwpcdn.com/6/12/jwplayer.flash.swf'

        result = client.request(pageUrl, referer=pageUrl)

        streamer = result.replace('//file', '')
        streamer = re.compile("file *: *'(.+?)'").findall(streamer)[-1]

        token = re.compile('getJSON[(]"(.+?)".+?json[.]token').findall(result.replace('\n', ''))[-1]
        token = client.request(token, referer=pageUrl)
        token = re.compile('"token" *: *"(.+?)"').findall(token)[-1]

        url = '%s pageUrl=%s swfUrl=%s token=%s live=true timeout=20' % (streamer, pageUrl, swfUrl, token)

        return url
    except:
        return 
Example 16
Project: Pancas   Author: Sup3Roque   File: p2pcast.py    GNU General Public License v2.0 6 votes vote down vote up
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]

        page = urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        page = 'http://p2pcast.tv/stream.php?id=%s&live=0&p2p=0&stretching=uniform' % page

        result = client.request(page, referer=referer)


        try:
            swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]
            swf = client.request(swf)
            swf = re.compile('flashplayer\s*:\s*[\'|\"](.+?)[\'|\"]').findall(swf)[0]
        except:
            swf = 'http://cdn.p2pcast.tv/jwplayer.flash.swf'


        url = re.compile('url\s*=\s*[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = base64.b64decode(url)
        url = '%s|User-Agent=%s&Referer=%s' % (url, urllib.quote_plus(client.agent()), urllib.quote_plus(swf))

        return url
    except:
        return 
Example 17
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 6 votes vote down vote up
def get_movie(self, imdb, title, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', title.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.moviesearch_link % query

            result = ''
            links = [self.link_1]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
                if 'Donate' in str(result): break

            imdb = re.sub('[^0-9]', '', imdb)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = '%s?%s' % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 18
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 6 votes vote down vote up
def get_show(self, imdb, tvdb, tvshowtitle, year):
        try:
            query = re.sub('^THE\s+|^A\s+', '', tvshowtitle.strip().upper())[0]
            if not query.isalpha(): query = '1'
            query = self.tvsearch_link % query

            result = ''
            links = [self.link_1]
            for base_link in links:
                result = client.request(urlparse.urljoin(base_link, query), headers=self.headers)
                if 'Donate' in str(result): break

            imdb = re.sub('[^0-9]', '', imdb)

            result = result.decode('iso-8859-1').encode('utf-8')
            result = re.compile('id=%s>.+?href=(.+?)>' % imdb).findall(result)[0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = '%s?%s' % (urlparse.urlparse(url).path, urlparse.urlparse(url).query)
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 19
Project: Pancas   Author: Sup3Roque   File: genvideos_mv.py    GNU General Public License v2.0 6 votes vote down vote up
def get_sources(self, url, hosthdDict, hostDict, locDict):
        sources = []
        try:
            if url == None: return sources
            post = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
            post = urllib.urlencode({'v':post})
            result = client.request(self.post, post=post)
            result = json.loads(result)
            print("r102", result)
            for i in result:
                print("i",i,result[i])
                mq = 'SD'
                if '1080' in i: mq = '1080p'
                if '72' in i: mq = 'HD'
                sources.append({'source': 'gvideo', 'quality': mq, 'provider': 'Genvideos', 'url': result[i]})
            return sources
        except:
            return sources 
Example 20
Project: TornadoWeb   Author: VxCoder   File: escape.py    Apache License 2.0 6 votes vote down vote up
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
        """Parses a query string like urlparse.parse_qs, but returns the
        values as byte strings.

        Keys still become type str (interpreted as latin1 in python3!)
        because it's too painful to keep them as byte strings in
        python3 and in practice they're nearly always ascii anyway.
        """
        # This is gross, but python3 doesn't give us another way.
        # Latin1 is the universal donor of character encodings.
        result = _parse_qs(qs, keep_blank_values, strict_parsing,
                           encoding='latin1', errors='strict')
        encoded = {}
        for k, v in result.items():
            encoded[k] = [i.encode('latin1') for i in v]
        return encoded 
Example 21
Project: odorik   Author: nijel   File: test_odorik.py    GNU General Public License v3.0 5 votes vote down vote up
def sms_response(request, uri, headers):
    """httpretty SMS sending response generator."""
    assert uri.endswith('/sms')
    params = parse_qs(request.body.decode('utf-8'))
    if params['sender'][0] == '5517':
        return (200, headers, 'successfully_sent 132.44')
    return (200, headers, 'error unsupported_recipient') 
Example 22
Project: pyblish-win   Author: pyblish   File: cgi.py    GNU Lesser General Public License v3.0 5 votes vote down vote up
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
    """Parse a query given as a string argument."""
    warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
         PendingDeprecationWarning, 2)
    return urlparse.parse_qs(qs, keep_blank_values, strict_parsing) 
Example 23
Project: slidoc   Author: mitotic   File: sdviewer.py    BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def do_GET(self):
        parsed_url = urlparse.urlparse(self.path)
        params = urlparse.parse_qs(parsed_url.query)
        if parsed_url.path.startswith('/http:') or parsed_url.path.startswith('/https:'):
            url = parsed_url.path[1:]
            if parsed_url.query:
                url += '?' + parsed_url.query
            parsed_suburl = urlparse.urlparse(url)
            filename = os.path.basename(parsed_suburl.path) or 'file.md'
            req = urllib2.Request(url)
            try:
                response = urllib2.urlopen(req)
                file = StringIO(response.read())
            except Exception, excp:
                self.respond(cgi.escape('ERROR in accessing URL %s: %s' % (url, excp)))
                return
            cmd_args = []
            for arg in ("pace", "gsheet_url"):
                if arg in params and params[arg][0]:
                    cmd_args.append("--"+arg+"="+params[arg][0])
                    if arg == "gsheet_url":
                        cmd_args.append("--gsheet_login=")
            errmsg, outname, html, messages = process_files([file], [filename], cmd_args)
            if errmsg:
                self.respond('ERROR: '+errmsg)
            else:
                self.respond(html) 
Example 24
Project: VisualNN   Author: angelhunt   File: consumers.py    GNU General Public License v3.0 5 votes vote down vote up
def ws_connect(message):
    print('Connection being established...')
    message.reply_channel.send({
        'accept': True
    })
    # extracting id of network from url params
    params = urlparse.parse_qs(message.content['query_string'])
    networkId = params.get('id', ('Not Supplied',))[0]
    message.channel_session['networkId'] = networkId
    # adding socket to a group based on networkId to send updates of network
    Group('model-{0}'.format(networkId)).add(message.reply_channel) 
Example 25
Project: sqliv   Author: the-robot   File: google.py    GNU General Public License v3.0 5 votes vote down vote up
def filter_result(link):
    try:

        # Valid results are absolute URLs not pointing to a Google domain
        # like images.google.com or googleusercontent.com
        o = urlparse(link, 'http')
        if o.netloc and 'google' not in o.netloc:
            return link

        # Decode hidden URLs.
        if link.startswith('/url?'):
            link = parse_qs(o.query)['q'][0]

            # Valid results are absolute URLs not pointing to a Google domain
            # like images.google.com or googleusercontent.com
            o = urlparse(link, 'http')
            if o.netloc and 'google' not in o.netloc:
                return link

    # Otherwise, or on error, return None.
    except Exception:
        pass
    return None


# Shortcut to search images
# Beware, this does not return the image link. 
Example 26
Project: WSGI-web   Author: pushyzheng   File: Request.py    MIT License 5 votes vote down vote up
def parse_args(self, parse_data):
        arguments = urlparse.parse_qs(parse_data)
        return {k:v[0] for k,v in arguments.items()} 
Example 27
Project: django-oauth-toolkit-jwt   Author: Humanitec   File: test_views.py    MIT License 5 votes vote down vote up
def test_get_token_implicit(self):
        """
        Request an access token using Implicit Flow
        """
        Application.objects.create(
            client_id='user_app_id',
            client_secret='user_app_secret',
            client_type=Application.CLIENT_CONFIDENTIAL,
            authorization_grant_type=Application.GRANT_IMPLICIT,
            name='user app',
            skip_authorization=True,
            redirect_uris='http://localhost:8002/callback',
        )
        self.client.force_login(self.test_user)

        response = self.client.get(
            reverse("oauth2_provider_jwt:authorize") +
            '?response_type=token&client_id=user_app_id')
        self.assertEqual(302, response.status_code)
        url = urlparse(response.url)
        params = parse_qs(url.fragment)
        self.assertEqual('Bearer', params['token_type'][0])
        self.assertEqual('read write', params['scope'][0])

        self.assertTrue(params['access_token'][0])
        access_token_jwt = params['access_token_jwt'][0]
        self.assertTrue(access_token_jwt)

        payload_content = self.decode_jwt(access_token_jwt)
        self.assertEqual('test_user', payload_content['username'])
        self.assertEqual('read write', payload_content['scope']) 
Example 28
Project: django-oauth-toolkit-jwt   Author: Humanitec   File: test_views.py    MIT License 5 votes vote down vote up
def test_get_token_changed_id_attribute(self):
        """
        Request an access token using Implicit Flow
        """
        Application.objects.create(
            client_id='user_app_id',
            client_secret='user_app_secret',
            client_type=Application.CLIENT_CONFIDENTIAL,
            authorization_grant_type=Application.GRANT_IMPLICIT,
            name='user app',
            skip_authorization=True,
            redirect_uris='http://localhost:8002/callback',
        )
        self.client.force_login(self.test_user)

        response = self.client.get(
            reverse("oauth2_provider_jwt:authorize") +
            '?response_type=token&client_id=user_app_id')
        self.assertEqual(302, response.status_code)
        url = urlparse(response.url)
        params = parse_qs(url.fragment)
        self.assertEqual('Bearer', params['token_type'][0])
        self.assertEqual('read write', params['scope'][0])

        self.assertTrue(params['access_token'][0])
        access_token_jwt = params['access_token_jwt'][0]
        self.assertTrue(access_token_jwt)

        payload_content = self.decode_jwt(access_token_jwt)
        self.assertEqual(str(self.test_user.id), payload_content['id'])
        self.assertEqual('read write', payload_content['scope']) 
Example 29
Project: password_pwncheck   Author: CboeSecurity   File: pwned-password-server.py    MIT License 5 votes vote down vote up
def do_POST(self):
        if self.debug:
            print "Received Request"
        
        parsed_path = urlparse(unicode(self.path))
        length = int(self.headers.getheader('content-length'))
        field_data = self.rfile.read(length)
        esc_string = "[[-*-]]"
        field_data = field_data.replace(";",esc_string)
        args = parse_qs(field_data)
        self.user = "-"
        self.retval = "-"
        self.code = -1
        
        if parsed_path.path == "/checkpwd":
            message = ''
            if 'u' in args and 'p' in args:
                user = unquote(args['u'][0].replace(esc_string,";"))
                self.user = user
                password = unquote(args['p'][0].replace(esc_string,";")) #.decode('utf8')
                (isGood,code,reason) = self.verifyPasswordGood(user.encode('utf8'),password.encode('utf8'),False)
                message += u','.join(map(unicode,[isGood,code,reason]))
            self.send_response(200)
            self.end_headers()
            self.wfile.write(message)
        else:
            self.send_response(301)
            self.send_header('Location', 'https://www.youtube.com/watch?v=dQw4w9WgXcQ')
            self.end_headers()
        return 
Example 30
Project: bitpay-brick   Author: javgh   File: bitpayprovider.py    MIT License 5 votes vote down vote up
def get_bitcoin_uri_with_bluetooth_address(self, bluetooth_address):
        (scheme, netloc, path, query, fragment) = \
                urlparse.urlsplit(self.bitcoin_uri)
        query_parts = urlparse.parse_qs(query)
        query_parts['amount'] = float(query_parts['amount'][0])
        query_parts['r'] = 'bt:%s' % bluetooth_address.replace(':', '')
        query = urllib.urlencode(query_parts)
        return urlparse.urlunsplit((scheme, netloc, path, query, fragment)) 
Example 31
Project: crackdb   Author: kryc   File: webserver.py    GNU General Public License v3.0 5 votes vote down vote up
def parse_POST(self):
        '''
        Utility function to pull out the post variables
        from the HTTP body
        '''
        ctype, pdict = parse_header(self.headers['content-type'])
        if ctype == 'multipart/form-data':
            postvars = parse_multipart(self.rfile, pdict)
        elif ctype == 'application/x-www-form-urlencoded':
            length = int(self.headers['content-length'])
            postvars = parse_qs(self.rfile.read(length), 
                                keep_blank_values=1)
        else:
            postvars = {}
        return postvars 
Example 32
Project: jawfish   Author: war-and-code   File: cgi.py    MIT License 5 votes vote down vote up
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
    """Parse a query given as a string argument."""
    warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
         PendingDeprecationWarning, 2)
    return urlparse.parse_qs(qs, keep_blank_values, strict_parsing) 
Example 33
Project: jawfish   Author: war-and-code   File: cgi.py    MIT License 5 votes vote down vote up
def parse_qs(qs, keep_blank_values=0, strict_parsing=0):
    """Parse a query given as a string argument."""
    warn("cgi.parse_qs is deprecated, use urlparse.parse_qs instead",
         PendingDeprecationWarning, 2)
    return urlparse.parse_qs(qs, keep_blank_values, strict_parsing) 
Example 34
Project: youku   Author: hanguokai   File: util.py    Apache License 2.0 5 votes vote down vote up
def check_error(response, expect_status=200):
    """
    Youku error should return as json response, like:
    HTTP 400
    {
        "error":{
            "code":120010223,
            "type":"UploadsException",
            "description":"Expired upload token"
        }
    }

    But error also maybe in response url params or response body.

    Content-Type maybe application/json or text/plain.

    Args:
        expect_status: normally is 200 or 201
    """
    json = None
    try:
        json = response.json()
    except:
        pass
    if (response.status_code != expect_status or
            response.status_code == 400 or
            'error' in json):
        if json:
            error = json['error']
            raise YoukuError(error.get('code', ''), error.get('type', ''),
                             error.get('description', ''), response.status_code)
        else:
            # try to parse error from body
            error = parse_qs(response.text)
            raise YoukuError(error.get('code', [None])[0],
                             error.get('type', [None])[0],
                             error.get('description', [None])[0],
                             response.status_code) 
Example 35
Project: Pancas   Author: Sup3Roque   File: genesisresolvers.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(self, url):
        try:
            if 'picasaweb' in url.lower():
                result = getUrl(url).result
                aid = re.compile('aid=(\d*)').findall(result)[0]

                pid = urlparse.urlparse(url).fragment
                oid = re.compile('/(\d*)/').findall(urlparse.urlparse(url).path)[0]
                key = urlparse.parse_qs(urlparse.urlparse(url).query)['authkey'][0]

                url = 'http://plus.google.com/photos/%s/albums/%s/%s?authkey=%s' % (oid, aid, pid, key)

            result = getUrl(url, mobile=True).result

            u = re.compile('"(http[s]*://.+?videoplayback[?].+?)"').findall(result)[::-1]
            u = [i.replace('\\u003d','=').replace('\\u0026','&') for i in u]
            u = sum([self.tag(i) for i in u], [])

            url = []
            try: url += [[i for i in u if i['quality'] == '1080p'][0]]
            except: pass
            try: url += [[i for i in u if i['quality'] == 'HD'][0]]
            except: pass
            try: url += [[i for i in u if i['quality'] == 'SD'][0]]
            except: pass

            if url == []: return
            return url
        except:
            return 
Example 36
Project: Pancas   Author: Sup3Roque   File: genesisresolvers.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(self, url):
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
            page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')

            result = getUrl(url, referer=referer).result
            result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
            result = unwise().worker(result)

            strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
            strm = [i for i in strm if i.startswith('rtmp')][0]
            url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
            return url
        except:
            return 
Example 37
Project: Pancas   Author: Sup3Roque   File: okru.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        purged_jsonvars = {}
        lines = []
        best = '0'
        #referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        #(?://|\.)(ok.ru|odnoklassniki.ru)/(?:videoembed|video)/(.+)
        media_id = re.compile('(?://|\.)(ok.ru|odnoklassniki.ru)/(?:videoembed|video)/(.+)').findall(url)[0][1]
        vids = __get_Metadata(media_id)
        #control.log('saaa %s ' % vids)

        for entry in vids['urls']:
            quality = __replaceQuality(entry['name'])
            lines.append(quality)
            purged_jsonvars[quality] = entry['url'] + '|' + urllib.urlencode(header)
            if int(quality) > int(best): best = quality

        if len(lines) == 1:
            return purged_jsonvars[lines[0]].encode('utf-8')
        else:
            return purged_jsonvars[str(best)].encode('utf-8')

        if result != -1:
            return purged_jsonvars[lines[result]].encode('utf-8')
        else:
            raise ResolverError('No link selected')

        raise ResolverError('No video found')
#        swf = re.compile('src\s*=[\'|\"](.+?player.+?\.js)[\'|\"]').findall(result)[0]

    except:
        return 
Example 38
Project: Pancas   Author: Sup3Roque   File: vk.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        try: oid, id = urlparse.parse_qs(urlparse.urlparse(url).query)['oid'][0] , urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        except: oid, id = re.compile('\/video(.*)_(.*)').findall(url)[0]
        try: hash = urlparse.parse_qs(urlparse.urlparse(url).query)['hash'][0]
        except: hash = _hash(oid, id)

        u = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % (oid, id, hash)
 
        result = client.request(u)
        result = re.sub(r'[^\x00-\x7F]+',' ', result)

        try: result = json.loads(result)['response']
        except: result = _private(oid, id)

        url = []
        try: url += [{'quality': 'HD', 'url': result['url720']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url540']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url480']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url360']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url240']}]
        except: pass

        if not url == []: return url

    except:
        return 
Example 39
Project: Pancas   Author: Sup3Roque   File: mybeststream.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')

        result = client.request(url, referer=referer)
        result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
        result = unwise.execute(result)

        strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        strm = [i for i in strm if i.startswith('rtmp')][0]
        url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
        return url
    except:
        return 
Example 40
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(self, url):
        try:
            post = urlparse.parse_qsl(urlparse.urlparse(url).query, True)
            post = [i for i in post if not i[0] == 'image']
            post = post

            query = urlparse.parse_qs(urlparse.urlparse(url).query)

            image = urllib.unquote(query['image'][0])

            ref = self.video_link % query['t'][0]

            url = urlparse.urlparse(url).path
            url += '?s=%s&t=%s&app_id=Specto505' % (query['id'][0], query['t'][0])

            links = [self.link_1]
            for base_link in links:
                referer = urlparse.urljoin(base_link, ref)
                cookie = client.request(referer, output='cookie')
                result = client.request(urlparse.urljoin(base_link, url), post=post, referer=referer, cookie=cookie)
                if 'com_iceplayer' in str(result): break

            self.img_parser(image, referer)

            url = urlparse.parse_qs(urlparse.urlparse(result).query)['url'][0]
            url = resolvers.request(url)

            return url
        except:
            return 
Example 41
Project: Pancas   Author: Sup3Roque   File: muchmovies_mv_tv.py    GNU General Public License v2.0 5 votes vote down vote up
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
        try:
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            t = cleantitle.get(data['tvshowtitle'])
            print('###',t,data['tvshowtitle'])
            year = re.findall('(\d{4})', date)[0]
            years = [str(year), str(int(year)+1), str(int(year)-1)]
            season = '%01d' % int(season)
            episode = '%01d' % int(episode)

            headers = {'X-Requested-With': 'XMLHttpRequest'}
            query = urllib.urlencode({'keyword': '%s - Season %s' % (data['tvshowtitle'], season)})
            url = urlparse.urljoin(self.base_link, self.search_link)
            r = client.request(url, post=query, headers=headers)
            r = json.loads(r)['content']
            print('>>>',r)

            r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'ss-title'}), client.parseDOM(r, 'a', attrs = {'class': 'ss-title'}))
            r = [(i[0], re.findall('(.+?) - season (\d+)$', i[1].lower())) for i in r]
            r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
            r = [i for i in r if t == cleantitle.get(i[1])]
            r = [i[0] for i in r if season == '%01d' % int(i[2])][:2]
            r = [(i, re.findall('(\d+)', i)[-1]) for i in r]
            print('>>>',r)

            for i in r:
                try:
                    y, q = cache.get(self.muchmovies_info, 9000, i[1])
                    if not y in years: raise Exception()
                    return urlparse.urlparse(i[0]).path + '?episode=%01d' % int(episode)
                except:
                    pass
        except:
            return 
Example 42
Project: Pancas   Author: Sup3Roque   File: iwatchonline_mv_tv.py    GNU General Public License v2.0 5 votes vote down vote up
def get_movie(self, imdb, title, year):
        try:
            query = self.search_link
            post = {'searchquery': title, 'searchin': '1'}
            post = urllib.urlencode(post)


            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                headers = {"Content-Type":"application/x-www-form-urlencoded", "Referer":urlparse.urljoin(base_link, query)}
                result = client.request(urlparse.urljoin(base_link, query), post=post, headers=headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0]
            result = client.parseDOM(result, 'td')

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 43
Project: Pancas   Author: Sup3Roque   File: iwatchonline_mv_tv.py    GNU General Public License v2.0 5 votes vote down vote up
def get_show(self, imdb, tvdb, tvshowtitle, year):

        try:
            query = self.search_link
            post = {'searchquery': tvshowtitle, 'searchin': '2'}
            post = urllib.urlencode(post)

            result = ''
            links = [self.link_3]
            for base_link in links:
                headers = {'Referer': base_link+ "/advance-search"}

                result = client.request(urlparse.urljoin(base_link, query), post=post, headers=headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0]
            result = client.parseDOM(result, 'td')


            tvshowtitle = cleantitle.tv(tvshowtitle)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result]
            result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
            print "--!--"
            print result
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]
            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return 
Example 44
Project: Pancas   Author: Sup3Roque   File: genesisresolvers.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(self, url):
        try:
            referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
            page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')

            result = getUrl(url, referer=referer).result
            result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
            result = unwise().worker(result)

            strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
            strm = [i for i in strm if i.startswith('rtmp')][0]
            url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
            return url
        except:
            return 
Example 45
Project: Pancas   Author: Sup3Roque   File: sawlive.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        page = re.compile('//.+?/(?:embed|v)/([0-9a-zA-Z-_]+)').findall(url)[0]
        page = 'http://sawlive.tv/embed/%s' % page

        try: referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        except: referer = page

        result = client.request(page, referer=referer)

        unpacked = ''
        packed = result.split('\n')
        for i in packed: 
            try: unpacked += jsunpack.unpack(i)
            except: pass
        result += unpacked
        result = urllib.unquote_plus(result)

        result = re.compile('<iframe(.+?)</iframe>').findall(result)[-1]

        url = re.compile('src\s*=\s*[\'|\"](.+?)[\'|\"].+?[\'|\"](.+?)[\'|\"]').findall(result)[0]
        url = '/'.join(url)

        result = client.request(url, referer=referer)

        strm = re.compile("'streamer'.+?'(.+?)'").findall(result)[0]
        file = re.compile("'file'.+?'(.+?)'").findall(result)[0]
        swf = re.compile("SWFObject\('(.+?)'").findall(result)[0]

        url = '%s playpath=%s swfUrl=%s pageUrl=%s live=1 timeout=30' % (strm, file, swf, url)
        return url
    except:
        return 
Example 46
Project: Pancas   Author: Sup3Roque   File: vk.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        try: oid, id = urlparse.parse_qs(urlparse.urlparse(url).query)['oid'][0] , urlparse.parse_qs(urlparse.urlparse(url).query)['id'][0]
        except: oid, id = re.compile('\/video(.*)_(.*)').findall(url)[0]
        try: hash = urlparse.parse_qs(urlparse.urlparse(url).query)['hash'][0]
        except: hash = _hash(oid, id)

        u = 'http://api.vk.com/method/video.getEmbed?oid=%s&video_id=%s&embed_hash=%s' % (oid, id, hash)
 
        result = client.request(u)
        result = re.sub(r'[^\x00-\x7F]+',' ', result)

        try: result = json.loads(result)['response']
        except: result = _private(oid, id)

        url = []
        try: url += [{'quality': 'HD', 'url': result['url720']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url540']}]
        except: pass
        try: url += [{'quality': 'SD', 'url': result['url480']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url360']}]
        except: pass
        if not url == []: return url
        try: url += [{'quality': 'SD', 'url': result['url240']}]
        except: pass

        if not url == []: return url

    except:
        return 
Example 47
Project: Pancas   Author: Sup3Roque   File: mybeststream.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(url):
    try:
        referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
        page = url.replace(referer, '').replace('&referer=', '').replace('referer=', '')

        result = client.request(url, referer=referer)
        result = re.compile("}[(]('.+?' *, *'.+?' *, *'.+?' *, *'.+?')[)]").findall(result)[-1]
        result = unwise.execute(result)

        strm = re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
        strm = [i for i in strm if i.startswith('rtmp')][0]
        url = '%s pageUrl=%s live=1 timeout=10' % (strm, page)
        return url
    except:
        return 
Example 48
Project: Pancas   Author: Sup3Roque   File: icefilms_mv_tv_null.py    GNU General Public License v2.0 5 votes vote down vote up
def resolve(self, url):
        try:
            post = urlparse.parse_qsl(urlparse.urlparse(url).query, True)
            post = [i for i in post if not i[0] == 'image']
            post = post

            query = urlparse.parse_qs(urlparse.urlparse(url).query)

            image = urllib.unquote(query['image'][0])

            ref = self.video_link % query['t'][0]

            url = urlparse.urlparse(url).path
            url += '?s=%s&t=%s&app_id=Specto505' % (query['id'][0], query['t'][0])

            links = [self.link_1]
            for base_link in links:
                referer = urlparse.urljoin(base_link, ref)
                cookie = client.request(referer, output='cookie')
                result = client.request(urlparse.urljoin(base_link, url), post=post, referer=referer, cookie=cookie)
                if 'com_iceplayer' in str(result): break

            self.img_parser(image, referer)

            url = urlparse.parse_qs(urlparse.urlparse(result).query)['url'][0]
            url = resolvers.request(url)

            return url
        except:
            return 
Example 49
Project: Pancas   Author: Sup3Roque   File: fmovies_mv_tv.py    GNU General Public License v2.0 5 votes vote down vote up
def get_episode(self, url, imdb, tvdb, title, date, season, episode):

        try:
            if url == None: return

            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['title'],  url['season'], url['episode'] = title, season, episode
            url = urllib.urlencode(url)
            return url
        except:
            return 
Example 50
Project: Pancas   Author: Sup3Roque   File: iwatchonline_mv_tv.py    GNU General Public License v2.0 5 votes vote down vote up
def get_movie(self, imdb, title, year):
        try:
            query = self.search_link
            post = {'searchquery': title, 'searchin': '1'}
            post = urllib.urlencode(post)


            result = ''
            links = [self.link_1, self.link_3]
            for base_link in links:
                headers = {"Content-Type":"application/x-www-form-urlencoded", "Referer":urlparse.urljoin(base_link, query)}
                result = client.request(urlparse.urljoin(base_link, query), post=post, headers=headers)
                if 'widget search-page' in str(result): break

            result = client.parseDOM(result, 'div', attrs = {'class': 'widget search-page'})[0]
            result = client.parseDOM(result, 'td')

            title = cleantitle.movie(title)
            years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
            result = [(client.parseDOM(i, 'a', ret='href')[-1], client.parseDOM(i, 'a')[-1]) for i in result]
            result = [i for i in result if title == cleantitle.movie(i[1])]
            result = [i[0] for i in result if any(x in i[1] for x in years)][0]

            url = client.replaceHTMLCodes(result)
            try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
            except: pass
            url = urlparse.urlparse(url).path
            url = url.encode('utf-8')
            return url
        except:
            return