Python urllib.request.quote() Examples

The following are 30 code examples of urllib.request.quote(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module urllib.request , or try the search function .
Example #1
Source File: gaode_map.py    From data_analysis with Apache License 2.0 6 votes vote down vote up
def getlnglat(address):
    """
    获取一个中文地址的经纬度(lat:纬度值,lng:经度值)
    """
    url_base = "http://api.map.baidu.com/geocoder/v2/"
    output = "json"
    ak = "" # 浏览器端密钥
    address = quote(address) # 由于本文地址变量为中文,为防止乱码,先用quote进行编码
    url = url_base + '?' + 'address=' + address  + '&output=' + output + '&ak=' + ak 
    lat = 0.0
    lng = 0.0
    res = requests.get(url)
    temp = json.loads(res.text)
    if temp["status"] == 0:
        lat = temp['result']['location']['lat']
        lng = temp['result']['location']['lng']
    return lat,lng


#用来正常显示中文标签 
Example #2
Source File: google.py    From pyconjpbot with MIT License 6 votes vote down vote up
def google_image(message, keywords):
    """
    google で画像検索した結果を返す

    https://github.com/llimllib/limbo/blob/master/limbo/plugins/image.py
    """

    query = quote(keywords)
    searchurl = "https://www.google.com/search?tbm=isch&q={0}".format(query)

    # this is an old iphone user agent. Seems to make google return good results.
    useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Versio  n/4.0.5 Mobile/8A293 Safari/6531.22.7"

    result = requests.get(searchurl, headers={"User-agent": useragent}).text
    images = list(map(unescape, re.findall(r"var u='(.*?)'", result)))

    if images:
        botsend(message, choice(images))
    else:
        botsend(message, "`{}` での検索結果はありませんでした".format(keywords)) 
Example #3
Source File: google.py    From pyconjpbot with MIT License 6 votes vote down vote up
def google_map(message, keywords):
    """
    google マップで検索した結果を返す

    https://github.com/llimllib/limbo/blob/master/limbo/plugins/map.py
    """
    query = quote(keywords)

    # Slack seems to ignore the size param
    #
    # To get google to auto-reasonably-zoom its map, you have to use a marker
    # instead of using a "center" parameter. I found that setting it to tiny
    # and grey makes it the least visible.
    url = "https://maps.googleapis.com/maps/api/staticmap?size=800x400&markers={0}&maptype={1}"
    url = url.format(query, 'roadmap')

    botsend(message, url)
    attachments = [{
        'pretext': '<http://maps.google.com/maps?q={}|大きい地図で見る>'.format(query),
        'mrkdwn_in': ["pretext"],
    }]
    botwebapi(message, attachments) 
Example #4
Source File: Function.py    From pc-protector-moe with GNU General Public License v3.0 6 votes vote down vote up
def rename(self, ids, new_name):
        """
        功能:改名
        返回值:dict
        """
        try:
            arg = self.str_arg(ids=ids, new_name=new_name)
            url = self.server + 'boat/renameShip/{ids}/{new_name}/'.format(**arg) + self.get_url_end()
            url = quote(url, safe=";/?:@&=+$,", encoding="utf-8")
            data=self.Mdecompress(url)
            data = json.loads(data)
            error_find(data)
            if is_write and os.path.exists('requestsData'):
                with open('requestsData/rename.json', 'w') as f:
                    f.write(json.dumps(data))
            return data
        except HmError as e:
            print('Rename FAILED! Reason:', e.message)
            raise
        except Exception as Error_information:
            print('Rename FAILED! Reason:', Error_information)
            raise 
Example #5
Source File: stock.py    From limbo with MIT License 6 votes vote down vote up
def stockprice(ticker):
    url = "https://www.google.com/finance?q={0}"
    soup = BeautifulSoup(
        requests.get(url.format(quote(ticker))).text, "html5lib")

    try:
        company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text,
                                     re.M)[0]
    except IndexError:
        logging.info("Unable to find stock {0}".format(ticker))
        return ""
    price = soup.select("#price-panel .pr span")[0].text
    change, pct = soup.select("#price-panel .nwp span")[0].text.split()
    time = " ".join(soup.select(".mdata-dis")[0].parent.text.split()[:4])
    pct.strip('()')

    emoji = ":chart_with_upwards_trend:" if change.startswith("+") \
            else ":chart_with_downwards_trend:"

    return "{0} {1} {2}: {3} {4} {5} {6} {7}".format(
        emoji, company, ticker, price, change, pct, time, emoji) 
Example #6
Source File: image.py    From limbo with MIT License 6 votes vote down vote up
def image(search, unsafe=False):
    """given a search string, return a image URL via google search"""
    searchb = quote(search.encode("utf8"))

    safe = "&safe=" if unsafe else "&safe=active"
    searchurl = "https://www.google.com/search?tbm=isch&q={0}{1}".format(
        searchb, safe)

    # this is an old iphone user agent. Seems to make google return good results.
    useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us)" \
        " AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"

    result = requests.get(searchurl, headers={"User-agent": useragent}).text

    images = list(map(unescape, re.findall(r"imgres[?]imgurl=(.*?)&amp;", result)))
    shuffle(images)

    if images:
        return images[0]
    return "" 
Example #7
Source File: gif.py    From limbo with MIT License 6 votes vote down vote up
def gif(search, unsafe=False):
    """given a search string, return a gif URL via google search"""
    searchb = quote(search.encode("utf8"))

    safe = "&safe=" if unsafe else "&safe=active"
    searchurl = "https://www.google.com/search?tbs=itp:animated&tbm=isch&q={0}{1}" \
        .format(searchb, safe)

    # this is an old iphone user agent. Seems to make google return good results.
    useragent = "Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us)" \
        " AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/6531.22.7"

    result = requests.get(searchurl, headers={"User-agent": useragent}).text

    gifs = list(map(unescape, re.findall(r"imgres[?]imgurl=(.*?)&amp;", result)))
    shuffle(gifs)

    if gifs:
        return gifs[0]
    return "" 
Example #8
Source File: server.py    From squeeze-alexa with GNU General Public License v3.0 5 votes vote down vote up
def play_genres(self, genre_list, player_id=None):
        """Adds then plays a random mix of albums of specified genres"""
        gs = genre_list or []
        commands = (["playlist clear", "playlist shuffle 1"] +
                    ["playlist addalbum %s * *" % urllib.quote(genre)
                     for genre in gs if genre] +
                    ["play 2"])
        pid = player_id or self.cur_player_id
        return self._request(["%s %s" % (pid, com) for com in commands]) 
Example #9
Source File: base.py    From FastWordQuery with GNU General Public License v3.0 5 votes vote down vote up
def quote_word(self):
        return urllib2.quote(self.word) 
Example #10
Source File: eudict.py    From FastWordQuery with GNU General Public License v3.0 5 votes vote down vote up
def fld_sound(self):
        url = 'https://api.frdic.com/api/v2/speech/speakweb?langid=es&txt=QYN{word}'.format(
            word=urllib2.quote(base64.b64encode(self.word.encode('utf-8'))))
        filename = get_hex_name(self.unique.lower(), url, 'mp3')
        if os.path.exists(filename) or self.net_download(filename, url):
            return self.get_anki_label(filename, 'audio')
        return '' 
Example #11
Source File: frdic.py    From FastWordQuery with GNU General Public License v3.0 5 votes vote down vote up
def fld_sound(self):
        url = 'https://api.frdic.com/api/v2/speech/speakweb?langid=fr&txt=QYN{word}'.format(
            word=urllib2.quote(base64.b64encode(self.word.encode('utf-8')))
        )
        filename = get_hex_name(self.unique.lower(), url, 'mp3')
        if os.path.exists(filename) or self.net_download(filename, url):
                return self.get_anki_label(filename, 'audio')
        return '' 
Example #12
Source File: esdict.py    From FastWordQuery with GNU General Public License v3.0 5 votes vote down vote up
def fld_sound(self):
        url = 'https://api.frdic.com/api/v2/speech/speakweb?langid=es&txt=QYN{word}'.format(
            word=urllib2.quote(base64.b64encode(self.word.encode('utf-8')))
        )
        filename = get_hex_name(self.unique.lower(), url, 'mp3')
        if os.path.exists(filename) or self.net_download(filename, url):
                return self.get_anki_label(filename, 'audio')
        return '' 
Example #13
Source File: oxford.py    From FastWordQuery with GNU General Public License v3.0 5 votes vote down vote up
def _get_from_api(self, lang='en'):
        app_id = '45aecf84'
        app_key = 'bb36fd6a1259e5baf8df6110a2f7fc8f'
        headers = {'app_id': app_id, 'app_key': app_key}
        word_id = urllib2.quote(self.word.lower().replace(' ', '_'))
        url = u'https://od-api.oxforddictionaries.com/api/v1/entries/' + lang + u'/' + word_id
        result = {'lexicalEntries': ''}
        try:
            result.update(json.loads(self.get_response(url, headers=headers, timeout=10))['results'][0])
        except:
            pass
        return self.cache_this(result) 
Example #14
Source File: get_city_data.py    From algorithms-in-python with MIT License 5 votes vote down vote up
def get_city_distance(city_from, city_to):
    url = MAPS_API_TEMPLATE.format(quote(city_from), quote(city_to))
    raw_response = urlopen(url).read().decode(encoding='UTF-8').replace('\n','')
    parsed_response = json.loads(raw_response)
    distance_in_meters = parsed_response['rows'][0]['elements'][0]['distance']['value']
    return distance_in_meters 
Example #15
Source File: ariyana.py    From spntaBot with GNU General Public License v3.0 5 votes vote down vote up
def run(message, matches, chat_id, step):
    if matches[0] == 'voice':
        text = ur.quote(matches[1])
        url = 'http://api.farsireader.com/ArianaCloudService/ReadTextGET?APIKey=demo&Text={}&Speaker=Female1' \
              '&Format=mp3%2F32%2Fm&GainLevel=3&PitchLevel=4&PunctuationLevel=2&SpeechSpeedLevel=5' \
              '&ToneLevel=10'.format(text)
        ur.urlretrieve(url, 'tmp/{}.mp3'.format(message['from']['id']))
        bot.sendVoice(chat_id, open('tmp/{}.mp3'.format(message['from']['id']), 'rb'), caption='@spntaBot')
        os.remove('tmp/{}.mp3'.format(message['from']['id'])) 
Example #16
Source File: spider.py    From etlpy with Apache License 2.0 5 votes vote down vote up
def iriToUri(iri):
    parts= urlparse(iri)

    pp= [(parti,part) for parti, part in enumerate(parts)]
    res=[];
    for p in pp:
        res.append(p[1] if p[0] != 4 else quote(p[1] ))

    return urlunparse(res); 
Example #17
Source File: server.py    From squeeze-alexa with GNU General Public License v3.0 5 votes vote down vote up
def playlist_play(self, path, player_id=None):
        """Play song / playlist immediately"""
        self.player_request("playlist play %s" % (urllib.quote(path)),
                            player_id=player_id) 
Example #18
Source File: server.py    From squeeze-alexa with GNU General Public License v3.0 5 votes vote down vote up
def playlist_resume(self, name, resume=True, wipe=False, player_id=None):
        cmd = ("playlist resume %s noplay:%d wipePlaylist:%d"
               % (urllib.quote(name), int(not resume), int(wipe)))
        self.player_request(cmd, wait=False, player_id=player_id) 
Example #19
Source File: generate_heatmap.py    From music163-spiders with MIT License 5 votes vote down vote up
def getlnglat(address):
    url = "http://api.map.baidu.com/geocoder/v2/"
    output = 'json'
    # 密钥需要到百度开发者平台申请
    ak = '这里填写自己的密钥'
    addr = quote(address)
    uri = url + '?' + 'address=' + addr + '&output=' + output + '&ak=' + ak
    req = urlopen(uri)
    res = req.read().decode()
    temp = json.loads(res)
    return temp


# 生成热力图 
Example #20
Source File: oxford.py    From WordQuery with GNU General Public License v3.0 5 votes vote down vote up
def _get_from_api(self, lang="en"):
        word = self.word
        baseurl = "https://od-api.oxforddictionaries.com/api/v1"
        app_id = "45aecf84"
        app_key = "bb36fd6a1259e5baf8df6110a2f7fc8f"
        headers = {"app_id": app_id, "app_key": app_key}

        word_id = urllib2.quote(word.lower().replace(" ", "_"))
        url = baseurl + "/entries/" + lang + "/" + word_id
        url = urllib2.Request(url, headers=headers)
        try:
            return json.loads(urllib2.urlopen(url).read())
        except:
            pass 
Example #21
Source File: userdata.py    From peach with Mozilla Public License 2.0 5 votes vote down vote up
def get_object(bucket, obj, file_path):
    if file_path.endswith("/"):
        return
    in_file = urlopen('http://%s.s3.amazonaws.com/%s' % (bucket, quote(obj.key)))
    size = int(in_file.info()['content-length'])
    assert size == obj.size
    folder = os.path.dirname(file_path)
    if not os.path.exists(folder):
        os.makedirs(folder)
    with open(file_path, 'wb') as out_file:
        while size > 0:
            buf = in_file.read(min(size, 64 * 1024))
            out_file.write(buf)
            size -= len(buf)
    os.utime(file_path, (obj.date, obj.date)) 
Example #22
Source File: search_engine.py    From dialogbot with Apache License 2.0 5 votes vote down vote up
def search_bing(query):
        """
        通过bing检索答案,包括bing知识图谱、bing网典
        :param query:
        :return: list, string
        """
        answer = []
        left_text = ''
        # 获取bing的摘要
        soup_bing = html_crawler.get_html_bing(bing_url_prefix + quote(query))
        # 判断是否在Bing的知识图谱中
        r = soup_bing.find(class_="bm_box")

        if r:
            r = r.find_all(class_="b_vList")
            if r and len(r) > 1:
                r = r[1].find("li").get_text().strip()
                if r:
                    answer.append(r)
                    logger.debug("Bing知识图谱找到答案")
                    return answer, left_text
        else:
            r = soup_bing.find(id="b_results")
            if r:
                bing_list = r.find_all('li')
                for bl in bing_list:
                    temp = bl.get_text()
                    if temp.__contains__(" - 必应网典"):
                        logger.debug("查找Bing网典")
                        url = bl.find("h2").find("a")['href']
                        if url:
                            bingwd_soup = html_crawler.get_html_bingwd(url)
                            r = bingwd_soup.find(class_='bk_card_desc').find("p")
                            if r:
                                r = r.get_text().replace("\n", "").strip()
                                if r:
                                    logger.debug("Bing网典找到答案")
                                    answer.append(r)
                                    return answer, left_text
                left_text += r.get_text()
        return answer, left_text 
Example #23
Source File: stockphoto.py    From limbo with MIT License 5 votes vote down vote up
def stock(searchterm):
    searchterm = quote(searchterm)
    url = "https://www.shutterstock.com/search?searchterm={0}".format(
        searchterm)
    res = requests.get(url)
    soup = BeautifulSoup(res.text, "html5lib")
    images = ["https:" + x["src"] for x in soup.select(".img-wrap img")]
    shuffle(images)

    return images[0] if images else "" 
Example #24
Source File: geocities.py    From limbo with MIT License 5 votes vote down vote up
def gif(searchterm):
    searchterm = quote(searchterm)
    searchurl = "https://gifcities.archive.org/api/v1/gifsearch?q={}".format(
        searchterm)
    results = requests.get(searchurl).json()
    gifs = list(
        map(lambda x: "https://web.archive.org/web/{0}".format(x['gif']),
            results))
    shuffle(gifs)

    if gifs:
        return gifs[0]
    else:
        return "" 
Example #25
Source File: map.py    From limbo with MIT License 5 votes vote down vote up
def makemap(query):
    querywords = []
    args = {
        "maptype": "roadmap",
    }
    for word in query.split(" "):
        if '=' in word:
            opt, val = word.split("=")
            args[opt] = val
        else:
            querywords.append(word)

    query = quote(" ".join(querywords).encode("utf8"))

    # Slack seems to ignore the size param
    #
    # To get google to auto-reasonably-zoom its map, you have to use a marker
    # instead of using a "center" parameter. I found that setting it to tiny
    # and grey makes it the least visible.
    url = "https://maps.googleapis.com/maps/api/staticmap?size=800x400&markers=size:tiny%7Ccolor:0xAAAAAA%7C{0}&maptype={1}"
    url = url.format(query, args["maptype"])

    if "zoom" in args:
        url += "&zoom={0}".format(args["zoom"])

    return url 
Example #26
Source File: calc.py    From limbo with MIT License 5 votes vote down vote up
def calc(eq):
    query = quote(eq)
    url = "https://encrypted.google.com/search?hl=en&q={0}".format(query)
    soup = BeautifulSoup(requests.get(url).text, "html5lib")

    answer = soup.findAll("h2", attrs={"class": "r"})
    if not answer:
        answer = soup.findAll("span", attrs={"class": "_m3b"})
        if not answer:
            return ":crying_cat_face: Sorry, google doesn't have an answer for you :crying_cat_face:"

    # They seem to use u\xa0 (non-breaking space) in place of a comma
    answer = answer[0].text.replace(u"\xa0", ",")
    return answer 
Example #27
Source File: youtube.py    From limbo with MIT License 5 votes vote down vote up
def youtube(searchterm):
    url = "https://www.youtube.com/results?search_query={0}"
    url = url.format(quote(searchterm))

    r = requests.get(url)
    results = re.findall('a href="(/watch[^&]*?)"', r.text)

    if not results:
        return "sorry, no videos found"

    return "https://www.youtube.com{0}".format(results[0]) 
Example #28
Source File: google.py    From limbo with MIT License 5 votes vote down vote up
def google(q):
    query = quote(q)
    url = "https://encrypted.google.com/search?q={0}".format(query)
    soup = BeautifulSoup(requests.get(url).text, "html5lib")

    answer = soup.findAll("h3", attrs={"class": "r"})
    if not answer:
        return ":crying_cat_face: Sorry, google doesn't have an answer for you :crying_cat_face:"

    try:
        return unquote(re.findall(r"q=(.*?)&", str(answer[0]))[0])
    except IndexError:
        # in this case there is a first answer without a link, which is a
        # google response! Let's grab it and display it to the user.
        return ' '.join(answer[0].stripped_strings) 
Example #29
Source File: google.py    From pyconjpbot with MIT License 5 votes vote down vote up
def google(message, keywords):
    """
    google で検索した結果を返す

    https://github.com/llimllib/limbo/blob/master/limbo/plugins/google.py
    """

    if keywords == 'help':
        return

    query = quote(keywords)
    url = "https://encrypted.google.com/search?q={0}".format(query)
    soup = BeautifulSoup(requests.get(url).text, "html.parser")

    answer = soup.findAll("h3", attrs={"class": "r"})
    if not answer:
        botsend(message, "`{}` での検索結果はありませんでした".format(keywords))

    try:
        _, url = answer[0].a['href'].split('=', 1)
        url, _ = url.split('&', 1)
        botsend(message, unquote(url))
    except IndexError:
        # in this case there is a first answer without a link, which is a
        # google response! Let's grab it and display it to the user.
        return ' '.join(answer[0].stripped_strings) 
Example #30
Source File: getgov.py    From addons-source with GNU General Public License v2.0 4 votes vote down vote up
def __get_place(self, gov_id, type_dic, preferred_lang):
        gov_url = 'http://gov.genealogy.net/semanticWeb/about/' + quote(gov_id)

        response = urlopen(gov_url)
        data = response.read()

        dom = parseString(data)
        top = dom.getElementsByTagName('gov:GovObject')

        place = Place()
        place.gramps_id = gov_id
        if not len(top) :
            return place, []

        count = 0
        for element in top[0].getElementsByTagName('gov:hasName'):
            count += 1
            place_name = self.__get_hasname(element)
            if count == 1:
                place.set_name(place_name)
            else:
                if place_name.lang == preferred_lang:
                    place.add_alternative_name(place.get_name())
                    place.set_name(place_name)
                else:
                    place.add_alternative_name(place_name)
        for element in top[0].getElementsByTagName('gov:hasType'):
            curr_lang = place.get_name().get_language()
            place_type = self.__get_hastype(element,curr_lang, type_dic, preferred_lang)
            place.set_type(place_type)
        for element in top[0].getElementsByTagName('gov:position'):
            latitude, longitude = self.__get_position(element)
            place.set_latitude(latitude)
            place.set_longitude(longitude)
        ref_list = []
        for element in top[0].getElementsByTagName('gov:isPartOf'):
            ref, date = self.__get_ispartof(element)
            ref_list.append((ref, date))
        for element in top[0].getElementsByTagName('gov:hasURL'):
            url = self.__get_hasurl(element)
            place.add_url(url)

        return place, ref_list