Python cookielib.FileCookieJar() Examples

The following are 9 code examples of cookielib.FileCookieJar(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module cookielib , or try the search function .
Example #1
Source File: sumologic.py    From sumologic-python-sdk with Apache License 2.0 5 votes vote down vote up
def __init__(self, accessId, accessKey, endpoint=None, caBundle=None, cookieFile='cookies.txt'):
        self.session = requests.Session()
        self.session.auth = (accessId, accessKey)
        self.DEFAULT_VERSION = 'v1'
        self.session.headers = {'content-type': 'application/json', 'accept': 'application/json'}
        if caBundle is not None:
            self.session.verify = caBundle
        cj = cookielib.FileCookieJar(cookieFile)
        self.session.cookies = cj
        if endpoint is None:
            self.endpoint = self._get_endpoint()
        else:
            self.endpoint = endpoint
        if self.endpoint[-1:] == "/":
            raise Exception("Endpoint should not end with a slash character") 
Example #2
Source File: dorkScan.py    From d4rkc0de with GNU General Public License v2.0 5 votes vote down vote up
def cxeSearch(go_inurl,go_site,go_cxe,go_ftype,maxc):
	uRLS = []
	counter = 0
       	while counter < int(maxc):
              	jar = cookielib.FileCookieJar("cookies")
                query = 'q='+go_inurl+'+'+go_site+'+'+go_ftype
                results_web = 'http://www.google.com/cse?'+go_cxe+'&'+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                request_web = urllib2.Request(results_web)
		agent = random.choice(header)
                request_web.add_header('User-Agent', agent)
		opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                text = opener_web.open(request_web).read()
		strreg = re.compile('(?<=href=")(.*?)(?=")')
                names = strreg.findall(text)
		counter += 100
                for name in names:
                      	if name not in uRLS:
                               	if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                       	pass
				elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("%", name):
                                       	pass
				else:
                                      	uRLS.append(name)
	tmpList = []; finalList = []
	print "[+] URLS (unsorted) :", len(uRLS)
        for entry in uRLS:
		try:
			t2host = entry.split("/",3)
			domain = t2host[2]
			if domain not in tmpList and "=" in entry:
				finalList.append(entry)
				tmpList.append(domain)
		except:
			pass
	print "[+] URLS (sorted)   :", len(finalList)
	return finalList 
Example #3
Source File: simpleDorkGUi.py    From d4rkc0de with GNU General Public License v2.0 5 votes vote down vote up
def gHarv(dork,site,dP,cxe,output,gnum,maxcount):
	global GoogleURLS, tmplist
        counter = 0;global gcount;gcount+=1;GoogleURLS = []
        try:
                CXr = CXdic[cxe]
                header = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)'
                saveCount = len(GoogleURLS);cmpslptime = 0;lastlen = 0
                while counter < int(maxcount):
                        jar = cookielib.FileCookieJar("cookies")
                        query = dP+dork+'+site:'+site
			gnum = int(gnum)
                        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+repr(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                        request_web = urllib2.Request(results_web);agent = random.choice(header)
                        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
                        names = strreg.findall(text)
                        for name in names:
                                if name not in GoogleURLS:
                                        if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                                pass
                                        elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("blackle", name):
                                                pass
                                        else:
						if output == 1:
	                                                txtField.insert(END,name+'\n')
						else:
							pass
                                                GoogleURLS.append(name)
                        sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
                        cmpslptime += sleeptimer;counter += int(gnum)
                        percent = int((1.0*counter/int(maxcount))*100)
                        laststatstring = 'Current MaxCount : '+repr(counter)+' | Last Query Sleeptimer ('+repr(sleeptimer)+') | Percent Done : '+repr(percent)
                        statList.append(laststatstring)                 
                        modStatus()		
		TestHost_bttn.configure(state=NORMAL,fg=fgCol)
                if iC == True:
                        for entry in GoogleURLS:
                                global tmplist
                                if '=' in entry: tmplist.append(entry)
                else:
                        pass
		for url in GoogleURLS:
			try:
				part = url.split('?')
				var = part[1].split('&')
				cod = ""
				for x in var:
					strX = x.split("=")
					cod += strX[0]
					parmURL = part[0]+cod
					if parmURL not in ParmURLS_List and url not in tmplist:
						ParmURLS_List.append(parmURL)
						tmplist.append(url)
			except:
				pass
		tmplist.sort()
		txtField.insert(END,'\nFound URLS: '+repr(len(GoogleURLS))+'\t\tTotal Parm-dupe Checked URLS: '+repr(len(tmplist)))
		txtField.insert(END,'\nGoogle Search Finished...\n')
        except IOError:
                pass 
Example #4
Source File: api.py    From 115wangpan with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def __init__(self, persistent=False,
                 cookies_filename=None, cookies_type='LWPCookieJar'):
        """
        :param bool auto_logout: whether to logout automatically when
            :class:`.API` object is destroyed

                                 .. deprecated:: 0.6.0
                                     Call :meth:`.API.logout` explicitly

        :param bool persistent: whether to use persistent session that stores
            cookies on disk
        :param str cookies_filename: path to the cookies file, use default
            path (`~/.115cookies`) if None
        :param str cookies_type: a string representing
            :class:`cookielib.FileCookieJar` subclass,
            `LWPCookieJar` (default) or `MozillaCookieJar`
        """
        self.persistent = persistent
        self.cookies_filename = cookies_filename
        self.cookies_type = cookies_type
        self.passport = None
        self.http = RequestHandler()
        self.logger = logging.getLogger(conf.LOGGING_API_LOGGER)
        # Cache attributes to decrease API hits
        self._user_id = None
        self._username = None
        self._signatures = {}
        self._upload_url = None
        self._lixian_timestamp = None
        self._root_directory = None
        self._downloads_directory = None
        self._receiver_directory = None
        self._torrents_directory = None
        self._task_count = None
        self._task_quota = None
        if self.persistent:
            self.load_cookies() 
Example #5
Source File: api.py    From 115wangpan with BSD 2-Clause "Simplified" License 5 votes vote down vote up
def save_cookies(self, ignore_discard=True, ignore_expires=True):
        """Save cookies to the file :attr:`.API.cookies_filename`"""
        if not isinstance(self.cookies, cookielib.FileCookieJar):
            m = 'Cookies must be a cookielib.FileCookieJar object to be saved.'
            raise APIError(m)
        self.cookies.save(ignore_discard=ignore_discard,
                          ignore_expires=ignore_expires) 
Example #6
Source File: dorkScan.py    From darkc0de-old-stuff with GNU General Public License v3.0 5 votes vote down vote up
def cxeSearch(go_inurl,go_site,go_cxe,go_ftype,maxc):
	uRLS = []
	counter = 0
       	while counter < int(maxc):
              	jar = cookielib.FileCookieJar("cookies")
                query = 'q='+go_inurl+'+'+go_site+'+'+go_ftype
                results_web = 'http://www.google.com/cse?'+go_cxe+'&'+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                request_web = urllib2.Request(results_web)
		agent = random.choice(header)
                request_web.add_header('User-Agent', agent)
		opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                text = opener_web.open(request_web).read()
		strreg = re.compile('(?<=href=")(.*?)(?=")')
                names = strreg.findall(text)
		counter += 100
                for name in names:
                      	if name not in uRLS:
                               	if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                       	pass
				elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("%", name):
                                       	pass
				else:
                                      	uRLS.append(name)
	tmpList = []; finalList = []
	print "[+] URLS (unsorted) :", len(uRLS)
        for entry in uRLS:
		try:
			t2host = entry.split("/",3)
			domain = t2host[2]
			if domain not in tmpList and "=" in entry:
				finalList.append(entry)
				tmpList.append(domain)
		except:
			pass
	print "[+] URLS (sorted)   :", len(finalList)
	return finalList 
Example #7
Source File: ACOi.py    From d4rkc0de with GNU General Public License v2.0 4 votes vote down vote up
def gharv(magicWord):
	vUniq = []
	for site in sitearray:
		counter = 0;bcksp = 0
		try:
			CXname = CXdic.keys()[int(random.random()*len(CXdic.keys()))];CXr = CXdic[CXname]
			print "\n| Site : ", site, " | CSEngine : ", CXname+" | Progress : ",
			saveCount = len(targets);cmpslptime = 0;lastlen = 0
			while counter < maxcount:
				jar = cookielib.FileCookieJar("cookies")
				query = magicWord+'+'+dork+'+site:'+site
			        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
			        request_web = urllib2.Request(results_web);agent = random.choice(header)
			        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
			        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
				names = strreg.findall(text)
			        for name in names:
					if name not in targets:
						if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
							pass
						elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name):
							pass
			                        else:
							targets.append(name)
				sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
				cmpslptime += sleeptimer;counter += gnum
				percent = int((1.0*counter/maxcount)*100)
				if bcksp == 1:
					stroutlen = 0
					while stroutlen < lastlen:
						sys.stdout.write("\10");stroutlen += 1
				sys.stdout.write("%s(%s) - %s percent" % (counter,sleeptimer,percent))
				lastlen = len(str(counter)+str(sleeptimer)+str(percent))+13
				sys.stdout.flush()
				bcksp = 1
			sys.stdout.write(" | %s Strings recieved, in %s seconds" % (len(targets)-saveCount,cmpslptime))
		except IOError:
			sys.stdout.write(" | %s Strings recieved" % (len(targets)-saveCount))
	firstparm = '';uList = []
	for entry in targets:
	        thost = entry.rsplit("=");t2host = entry.rsplit("/")
	        try:
	                firstparm = thost[1];domain = t2host[2]
	                if domain not in uList:
	                        if '.'+dorkEXT+'?' in entry and firstparm.isdigit() == True:
	                                uniqvictims.append(entry);uList.append(domain)
	                                pass
	                        elif 'http://' in entry and 'index.' in entry and firstparm.isalpha() == True:
	                                spidervictims.append(entry);uList.append(domain)
	                                pass
	                        else:
	                                miscVic.append(entry)
	                                pass
	        except:
	                pass
# ScanQueue Builder 
Example #8
Source File: simpleDorkGUi.py    From darkc0de-old-stuff with GNU General Public License v3.0 4 votes vote down vote up
def gHarv(dork,site,dP,cxe,output,gnum,maxcount):
	global GoogleURLS, tmplist
        counter = 0;global gcount;gcount+=1;GoogleURLS = []
        try:
                CXr = CXdic[cxe]
                header = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6)'
                saveCount = len(GoogleURLS);cmpslptime = 0;lastlen = 0
                while counter < int(maxcount):
                        jar = cookielib.FileCookieJar("cookies")
                        query = dP+dork+'+site:'+site
			gnum = int(gnum)
                        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+repr(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
                        request_web = urllib2.Request(results_web);agent = random.choice(header)
                        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
                        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
                        names = strreg.findall(text)
                        for name in names:
                                if name not in GoogleURLS:
                                        if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
                                                pass
                                        elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name) or re.search("blackle", name):
                                                pass
                                        else:
						if output == 1:
	                                                txtField.insert(END,name+'\n')
						else:
							pass
                                                GoogleURLS.append(name)
                        sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
                        cmpslptime += sleeptimer;counter += int(gnum)
                        percent = int((1.0*counter/int(maxcount))*100)
                        laststatstring = 'Current MaxCount : '+repr(counter)+' | Last Query Sleeptimer ('+repr(sleeptimer)+') | Percent Done : '+repr(percent)
                        statList.append(laststatstring)                 
                        modStatus()		
		TestHost_bttn.configure(state=NORMAL,fg=fgCol)
                if iC == True:
                        for entry in GoogleURLS:
                                global tmplist
                                if '=' in entry: tmplist.append(entry)
                else:
                        pass
		for url in GoogleURLS:
			try:
				part = url.split('?')
				var = part[1].split('&')
				cod = ""
				for x in var:
					strX = x.split("=")
					cod += strX[0]
					parmURL = part[0]+cod
					if parmURL not in ParmURLS_List and url not in tmplist:
						ParmURLS_List.append(parmURL)
						tmplist.append(url)
			except:
				pass
		tmplist.sort()
		txtField.insert(END,'\nFound URLS: '+repr(len(GoogleURLS))+'\t\tTotal Parm-dupe Checked URLS: '+repr(len(tmplist)))
		txtField.insert(END,'\nGoogle Search Finished...\n')
        except IOError:
                pass 
Example #9
Source File: ACOi.py    From darkc0de-old-stuff with GNU General Public License v3.0 4 votes vote down vote up
def gharv(magicWord):
	vUniq = []
	for site in sitearray:
		counter = 0;bcksp = 0
		try:
			CXname = CXdic.keys()[int(random.random()*len(CXdic.keys()))];CXr = CXdic[CXname]
			print "\n| Site : ", site, " | CSEngine : ", CXname+" | Progress : ",
			saveCount = len(targets);cmpslptime = 0;lastlen = 0
			while counter < maxcount:
				jar = cookielib.FileCookieJar("cookies")
				query = magicWord+'+'+dork+'+site:'+site
			        results_web = 'http://www.google.com/cse?cx='+CXr+'&q='+query+'&num='+str(gnum)+'&hl=en&lr=&ie=UTF-8&start=' + repr(counter) + '&sa=N'
			        request_web = urllib2.Request(results_web);agent = random.choice(header)
			        request_web.add_header('User-Agent', agent);opener_web = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
			        text = opener_web.open(request_web).read();strreg = re.compile('(?<=href=")(.*?)(?=")')
				names = strreg.findall(text)
			        for name in names:
					if name not in targets:
						if re.search(r'\(', name) or re.search("<", name) or re.search("\A/", name) or re.search("\A(http://)\d", name):
							pass
						elif re.search("google", name) or re.search("youtube", name) or re.search(".gov", name):
							pass
			                        else:
							targets.append(name)
				sleeptimer = random.choice(rSA);time.sleep(sleeptimer)
				cmpslptime += sleeptimer;counter += gnum
				percent = int((1.0*counter/maxcount)*100)
				if bcksp == 1:
					stroutlen = 0
					while stroutlen < lastlen:
						sys.stdout.write("\10");stroutlen += 1
				sys.stdout.write("%s(%s) - %s percent" % (counter,sleeptimer,percent))
				lastlen = len(str(counter)+str(sleeptimer)+str(percent))+13
				sys.stdout.flush()
				bcksp = 1
			sys.stdout.write(" | %s Strings recieved, in %s seconds" % (len(targets)-saveCount,cmpslptime))
		except IOError:
			sys.stdout.write(" | %s Strings recieved" % (len(targets)-saveCount))
	firstparm = '';uList = []
	for entry in targets:
	        thost = entry.rsplit("=");t2host = entry.rsplit("/")
	        try:
	                firstparm = thost[1];domain = t2host[2]
	                if domain not in uList:
	                        if '.'+dorkEXT+'?' in entry and firstparm.isdigit() == True:
	                                uniqvictims.append(entry);uList.append(domain)
	                                pass
	                        elif 'http://' in entry and 'index.' in entry and firstparm.isalpha() == True:
	                                spidervictims.append(entry);uList.append(domain)
	                                pass
	                        else:
	                                miscVic.append(entry)
	                                pass
	        except:
	                pass
# ScanQueue Builder