Python urllib2.quote() Examples
The following are 30
code examples of urllib2.quote().
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
You may also want to check out all available functions/classes of the module
urllib2
, or try the search function
.
Example #1
Source File: addon.py From plugin.video.xunleicloud with GNU General Public License v2.0 | 6 votes |
def getcloudvideourl(bturl, title): ''' resolve video url ''' dlpre = '{0}/req_get_method_vod'.format(cloudurlpre) # dl = '{0}/vod_dl_all?userid={1}&gcid={2}&filename={3}&t={4}'.format( # cloudurlpre, xl.userid, gcid, # urllib2.quote(title), cachetime) dl = '{0}?url={1}&platform=0&userid={2}&sessionid={3}&cache={4}'.format( dlpre, urllib.quote(bturl), xl.userid, xl.sid, cachetime) rsp = xl.urlopen(dl) vturls = json.loads(rsp) typ = {'356608': '1080P', '282880': '720P', '225536': '标清'} vtyps = [(typ[str(i['spec_id'])], i['vod_url_dt17']) for i in vturls['resp']['vodinfo_list'] if str(i['spec_id']) in typ] # vtyps = [(typ[k], v['url']) # for (k, v) in vturls.iteritems() if 'url' in v] # vtyps.insert(0, ('源码', surl)) # selitem = dialog.select('清晰度', [v[0] for v in vtyps]) # if selitem is -1: return # vtyp = vtyps[selitem] return vtyps
Example #2
Source File: happn.py From happn with MIT License | 6 votes |
def get_distance(self, userID): """ Fetches the distance from another user :param userID User ID of target user. """ # Create and send HTTP Get to Happn server h=headers h.update({ 'Authorization' : 'OAuth="' + self.oauth+'"', 'Content-Type' : 'application/json', }) #@TODO Trim query to just distance request query='{"fields":"spotify_tracks,modification_date,my_relations,social_synchronization.fields(facebook.fields(id),instagram.fields(pictures.fields(id),username)),school,age,clickable_profile_link,is_invited,type,gender,is_charmed,picture.fields(id,url,is_default).height(92).mode(0).width(92),last_meet_position,profiles.fields(id,url,is_default).height(1136).mode(1).width(640),has_charmed_me,job,first_name,last_invite_received,distance,availability,about,id,workplace,is_accepted"}' url = 'https://api.happn.fr/api/users/' + str(userID) + '?' + urllib2.quote(query) try: r = requests.get(url, headers=h) except Exception as e: raise HTTP_MethodError('Error Connecting to Happn Server: {}'.format(e)) if r.status_code == 200: #200 = 'OK' self.distance = r.json()['data']['distance'] logging.info('Sybil %d m from target',self.distance) else: raise HTTP_MethodError(httpErrors[r.status_code])
Example #3
Source File: splunk_rest_client.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def _get_proxy_info(context): if not context.get('proxy_hostname') or not context.get('proxy_port'): return None user_pass = '' if context.get('proxy_username') and context.get('proxy_password'): username = quote(context['proxy_username'], safe='') password = quote(context['proxy_password'], safe='') user_pass = '{user}:{password}@'.format( user=username, password=password) proxy = 'http://{user_pass}{host}:{port}'.format( user_pass=user_pass, host=context['proxy_hostname'], port=context['proxy_port']) proxies = { 'http': proxy, 'https': proxy, } return proxies
Example #4
Source File: LogglyHandler.py From squadron with MIT License | 6 votes |
def emit(self, record): try: msg = record.getMessage() log_data = "PLAINTEXT=" + urllib2.quote(simplejson.dumps( { 'msg':msg, 'localip':self.localip, 'publicip':self.publicip, 'tenant':'TODO :)' } )) urllib2.urlopen(self.base_url, log_data) except(KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
Example #5
Source File: qq.py From listen1 with MIT License | 6 votes |
def search_track(keyword): ''' return matched qq music songs ''' keyword = urllib2.quote(keyword.encode("utf8")) url = 'http://i.y.qq.com/s.music/fcgi-bin/search_for_qq_cp?' + \ 'g_tk=938407465&uin=0&format=jsonp&inCharset=utf-8' + \ '&outCharset=utf-8¬ice=0&platform=h5&needNewCode=1' + \ '&w=%s&zhidaqu=1&catZhida=1' + \ '&t=0&flag=1&ie=utf-8&sem=1&aggr=0&perpage=20&n=20&p=1' + \ '&remoteplace=txt.mqq.all&_=1459991037831&jsonpCallback=jsonp4' response = _qq_h(url % keyword) data = json.loads(response[len('jsonp4('):-len(')')]) result = [] for song in data["data"]["song"]["list"]: result.append(_convert_song(song)) return result
Example #6
Source File: WYtoQQ.py From syncPlaylist with MIT License | 6 votes |
def sync_song(self): for song_detail in self.source_playlist: song = song_detail[0] singer = song_detail[1] search_word = u"{} {}".format(song, singer) url_sw = quote(search_word.encode('utf8')) self.browser.get(qq_search_url.format(url_sw)) self.wait.until(lambda browser: browser.find_element_by_class_name("songlist__list")) sleep(0.5) @retry(retry_times=3) def _add(browser): browser.execute_script("document.getElementsByClassName('songlist__list')[0].firstElementChild.getElementsByClassName('list_menu__add')[0].click()") sleep(0.5) browser.find_element_by_css_selector("a[data-dirid='{}']".format(self.target_playlist_tag)).click() _print(u"song:{} success".format(song)) try: _add(self.browser) except RetryException: _print(u"song:{}, sync error".format(song)) self.failed_list.append(search_word) else: self.success_list.append(search_word)
Example #7
Source File: qq.py From listen1 with MIT License | 6 votes |
def search_track(keyword): ''' return matched qq music songs ''' keyword = urllib2.quote(keyword.encode("utf8")) url = 'http://i.y.qq.com/s.music/fcgi-bin/search_for_qq_cp?' + \ 'g_tk=938407465&uin=0&format=jsonp&inCharset=utf-8' + \ '&outCharset=utf-8¬ice=0&platform=h5&needNewCode=1' + \ '&w=%s&zhidaqu=1&catZhida=1' + \ '&t=0&flag=1&ie=utf-8&sem=1&aggr=0&perpage=20&n=20&p=1' + \ '&remoteplace=txt.mqq.all&_=1459991037831&jsonpCallback=jsonp4' response = _qq_h(url % keyword) data = json.loads(response[len('jsonp4('):-len(')')]) result = [] for song in data["data"]["song"]["list"]: result.append(_convert_song(song)) return result
Example #8
Source File: multiThreadDownload.py From ThesaurusSpider with MIT License | 6 votes |
def downloadSingleType(bigCate,smallCate,baseDir): """ 下载某一分类的词库,实际作用是修改全局变量让多线程可以获取到正确的下载路径和存储目录 :param bigCate: 一级分类 :param smallCate: 二级分类 :param baseDir: 下载目录 :return: None """ global smallCateURL, downloadDir, queue, logFile smallCateURL = 'http://dict.qq.pinyin.cn/dict_list?sort1=%s&sort2=%s' %(urllib2.quote(bigCate), urllib2.quote(smallCate)) # url编码 if baseDir[-1] == '/': print '路径 '+baseDir+' 末尾不能有/' return downloadDir = baseDir+'/'+bigCate+'/'+smallCate logFile = baseDir+'/download.log' if not os.path.exists(downloadDir.decode('utf8')): # 目录不存在的时候创建目录 os.makedirs(downloadDir.decode('utf8')) queue.put(smallCateURL)
Example #9
Source File: tracking.py From indico-plugins with MIT License | 6 votes |
def track_download_request(download_url, download_title): """Track a download in Piwik""" from indico_piwik.plugin import PiwikPlugin if not download_url: raise ValueError("download_url can't be empty") if not download_title: raise ValueError("download_title can't be empty") request = PiwikRequest(server_url=PiwikPlugin.settings.get('server_api_url'), site_id=PiwikPlugin.settings.get('site_id_events'), api_token=PiwikPlugin.settings.get('server_token'), query_script=PiwikPlugin.track_script) action_url = quote(download_url) dt = datetime.now() request.call(idsite=request.site_id, rec=1, action_name=quote(download_title.encode('utf-8')), url=action_url, download=action_url, h=dt.hour, m=dt.minute, s=dt.second)
Example #10
Source File: splunk_rest_client.py From misp42splunk with GNU Lesser General Public License v3.0 | 6 votes |
def _get_proxy_info(context): if not context.get('proxy_hostname') or not context.get('proxy_port'): return None user_pass = '' if context.get('proxy_username') and context.get('proxy_password'): username = quote(context['proxy_username'], safe='') password = quote(context['proxy_password'], safe='') user_pass = '{user}:{password}@'.format( user=username, password=password) proxy = 'http://{user_pass}{host}:{port}'.format( user_pass=user_pass, host=context['proxy_hostname'], port=context['proxy_port']) proxies = { 'http': proxy, 'https': proxy, } return proxies
Example #11
Source File: oxford.py From WordQuery with GNU General Public License v3.0 | 5 votes |
def _get_from_api(self, lang="en"): word = self.word baseurl = "https://od-api.oxforddictionaries.com/api/v1" app_id = "45aecf84" app_key = "bb36fd6a1259e5baf8df6110a2f7fc8f" headers = {"app_id": app_id, "app_key": app_key} word_id = urllib2.quote(word.lower().replace(" ", "_")) url = baseurl + "/entries/" + lang + "/" + word_id url = urllib2.Request(url, headers=headers) try: return json.loads(urllib2.urlopen(url).read()) except: pass
Example #12
Source File: bing.py From mmfeat with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getUrl(self, query, limit, offset): query = urllib2.quote("'{}'".format(query)) return self.format_url.format(query, limit, offset, self.lang)
Example #13
Source File: xiami.py From listen1 with MIT License | 5 votes |
def search_track(keyword): ''' return matched qq music songs ''' keyword = urllib2.quote(keyword.encode("utf8")) search_url = 'http://api.xiami.com/web?v=2.0&app_key=1&key=' + keyword \ + '&page=1&limit=50&_ksTS=1459930568781_153&callback=jsonp154' + \ '&r=search/songs' response = _xm_h(search_url) json_string = response[len('jsonp154('):-len(')')] data = json.loads(json_string) result = [] for song in data['data']["songs"]: result.append(_convert_song(song)) return result
Example #14
Source File: _serialization.py From azure-cosmos-table-python with Apache License 2.0 | 5 votes |
def _update_request(request, x_ms_version, user_agent_string): # Verify body if request.body: request.body = _get_data_bytes_or_stream_only('request.body', request.body) length = _len_plus(request.body) # only scenario where this case is plausible is if the stream object is not seekable. if length is None: raise ValueError(_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM) # if it is PUT, POST, MERGE, DELETE, need to add content-length to header. if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: request.headers['Content-Length'] = str(length) # append addtional headers based on the service request.headers['x-ms-version'] = x_ms_version request.headers['User-Agent'] = user_agent_string request.headers['x-ms-client-request-id'] = str(uuid.uuid1()) # If the host has a path component (ex local storage), move it path = request.host.split('/', 1) if len(path) == 2: request.host = path[0] request.path = '/{}{}'.format(path[1], request.path) # Encode and optionally add local storage prefix to path request.path = url_quote(request.path, '/()$=\',~')
Example #15
Source File: tests.py From django-wham with MIT License | 5 votes |
def make_mock_response_file(url, content, output_dir, method='GET', extra_params=None): if extra_params: url += '?' + urlencode(extra_params) path = output_dir + method + urllib2.quote(url, safe='') print path with open(path, 'w') as f: f.write(content)
Example #16
Source File: netease.py From listen1 with MIT License | 5 votes |
def _top_playlists(category=u'全部', order='hot', offset=0, limit=60): category = urllib2.quote(category.encode("utf8")) action = 'http://music.163.com/api/playlist/list?cat=' + category + \ '&order=' + order + '&offset=' + str(offset) + \ '&total=' + ('true' if offset else 'false') + '&limit=' + str(limit) data = json.loads(_ne_h(action)) return data['playlists']
Example #17
Source File: google.py From mmfeat with BSD 3-Clause "New" or "Revised" License | 5 votes |
def getUrl(self, query, offset): query = urllib2.quote("'{}'".format(query)) search_id = urllib2.quote(self.search_id) return self.format_url.format(search_id, self.api_keys[self.cur_api_key], query, \ offset, self.lang)
Example #18
Source File: chemspider.py From bioservices with GNU General Public License v3.0 | 5 votes |
def find(self, query): """return the first 100 compounds that match the query""" this = "Search.asmx/SimpleSearch?query=%s&token=%s" % (quote(query), self._token) res = self.http_get(this, frmt="xml") res = self.easyXML(res) Ids = [int(x.text) for x in res.findAll("int")] return Ids
Example #19
Source File: netease.py From listen1 with MIT License | 5 votes |
def _top_playlists(category=u'全部', order='hot', offset=0, limit=60): category = urllib2.quote(category.encode("utf8")) action = 'http://music.163.com/api/playlist/list?cat=' + category + \ '&order=' + order + '&offset=' + str(offset) + \ '&total=' + ('true' if offset else 'false') + '&limit=' + str(limit) data = json.loads(_ne_h(action)) return data['playlists']
Example #20
Source File: xiami.py From listen1 with MIT License | 5 votes |
def search_track(keyword): ''' return matched qq music songs ''' keyword = urllib2.quote(keyword.encode("utf8")) search_url = 'http://api.xiami.com/web?v=2.0&app_key=1&key=' + keyword \ + '&page=1&limit=50&_ksTS=1459930568781_153&callback=jsonp154' + \ '&r=search/songs' response = _xm_h(search_url) json_string = response[len('jsonp154('):-len(')')] data = json.loads(json_string) result = [] for song in data['data']["songs"]: result.append(_convert_song(song)) return result
Example #21
Source File: models.py From srvup-rest-framework with Apache License 2.0 | 5 votes |
def get_share_message(self): full_url = "%s%s" %(settings.FULL_DOMAIN_NAME, self.get_absolute_url()) return urllib2.quote("%s %s" %(self.share_message, full_url))
Example #22
Source File: recipe-576691.py From code with MIT License | 5 votes |
def DistroRank(nix): enc = "http://search.yahoo.com/search?p="+urllib2.quote('"'+nix+'" "linux distribution"') req = urllib2.Request(enc) req.add_header('User-Agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8) Gecko/20051111 Firefox/1.5 BAVM/1.0.0') f = urllib2.urlopen(req) t = f.read() f.close() rc = re.compile('<span id="infotext">1 - 10 of (.*) for <strong>') rez = rc.search(t) if rez: return int(rez.groups()[0].replace(',','')) else: return 0
Example #23
Source File: b2backend.py From duplicity_b2 with MIT License | 5 votes |
def get_or_post(self, url, data, headers=None, data_file=None): """ Sends the request, either get or post. If data and data_file are None, send a get request. data_file takes precedence over data. If headers are not supplied, just send with an auth key """ if headers is None: if self.auth_token is None: self._authorize() headers = {'Authorization': self.auth_token} if data_file is not None: data = data_file else: data = json.dumps(data) if data else None encoded_headers = dict( (k, urllib2.quote(v.encode('utf-8'))) for (k, v) in headers.iteritems() ) try: with OpenUrl(url, data, encoded_headers) as resp: out = resp.read() try: return json.loads(out) except ValueError: return out except urllib2.HTTPError as e: if e.code == 401: self.auth_token = None log.Warn("Authtoken expired, will reauthenticate with next attempt") raise e
Example #24
Source File: metrics.py From indico-plugins with MIT License | 5 votes |
def call(self, download_url, **query_params): return super(PiwikQueryReportEventMetricDownloads, self).call(method='Actions.getDownload', downloadUrl=quote(download_url), **query_params)
Example #25
Source File: cob.py From Implementing-DevOps-on-AWS with MIT License | 5 votes |
def fetch_headers(self, url, path): headers = {} # "\n" in the url, required by AWS S3 Auth v4 url = urlparse.urljoin(url, urllib2.quote(path)) + "\n" credentials = Credentials(self.access_key, self.secret_key, self.token) request = HTTPRequest("GET", url) signer = S3SigV4Auth(credentials, "s3", self.region, self.conduit) signer.add_auth(request) return request.headers
Example #26
Source File: cob.py From Implementing-DevOps-on-AWS with MIT License | 5 votes |
def fetch_headers(self, url, path): headers = {} # "\n" in the url, required by AWS S3 Auth v4 url = urlparse.urljoin(url, urllib2.quote(path)) + "\n" credentials = Credentials(self.access_key, self.secret_key, self.token) request = HTTPRequest("GET", url) signer = S3SigV4Auth(credentials, "s3", self.region, self.conduit) signer.add_auth(request) return request.headers
Example #27
Source File: cob.py From Implementing-DevOps-on-AWS with MIT License | 5 votes |
def fetch_headers(self, url, path): headers = {} # "\n" in the url, required by AWS S3 Auth v4 url = urlparse.urljoin(url, urllib2.quote(path)) + "\n" credentials = Credentials(self.access_key, self.secret_key, self.token) request = HTTPRequest("GET", url) signer = S3SigV4Auth(credentials, "s3", self.region, self.conduit) signer.add_auth(request) return request.headers
Example #28
Source File: base.py From FastWordQuery with GNU General Public License v3.0 | 5 votes |
def quote_word(self): return urllib2.quote(self.word)
Example #29
Source File: addon.py From plugin.video.xunleicloud with GNU General Public License v2.0 | 5 votes |
def btdigg(url, mname=''): ''' magnet linke search engine: btdigg.org ''' if url == 'search': url = 'http://btdigg.org/search?q=' if not mname: kb = Keyboard('', u'请输入搜索关键字') kb.doModal() if not kb.isConfirmed(): return mname = kb.getText() if not mname: return url = url + urllib2.quote(mname) else: url = 'http://btdigg.org' + url rsp = hc.urlopen(url) rpat = re.compile( r'"idx">.*?>([^<]+)</a>.*?href="(magnet.*?)".*?Size:.*?">(.*?)<') items = rpat.findall(rsp) menus = [ {'label': '%d.%s[%s]' % (s+1, v[0], v[2].replace(' ', '')), 'path': plugin.url_for('playlxmagnet', magnet=v[1])} for s, v in enumerate(items)] ppat = re.compile(r'%s%s' % ( 'class="pager".*?(?:href="(/search.*?)")?>←.*?>', '(\d+/\d+).*?(?:href="(/search.*?)")?>Next')) pgs = ppat.findall(rsp) for s, p in enumerate((pgs[0][0], pgs[0][2])): if p: menus.append({'label': '上一页' if not s else '下一页', 'path': plugin.url_for('btdigg', url=p)}) menus.insert(0, {'label': '[当前页%s页总共]返回上级目录' % pgs[0][1], 'path': plugin.url_for('index')}) return menus
Example #30
Source File: utils.py From google-translate with The Unlicense | 5 votes |
def quote_unicode(text, encoding="utf-8"): """urllib2.quote wrapper to handle unicode items.""" if isinstance(text, unicode): text = text.encode(encoding) return urllib2.quote(text).decode(encoding)