Python google.search() Examples

The following are code examples for showing how to use google.search(). They are from open source Python projects. You can vote up the examples you like or vote down the ones you don't like.

Example 1
Project: dl4ir-webnav   Author: nyu-dl   File: google_search.py    BSD 3-Clause "New" or "Revised" License 7 votes vote down vote up
def get_candidates(qatp):

    wk = wiki.Wiki(prm.pages_path)
    titles_pos = wk.get_titles_pos()

    candidates = []
    n = 0
    for q,a,t,p in qatp:
        if n % 100 == 0:
            print 'finding candidates sample', n
        n+=1

        c = []

        for page in google.search(q.lower() + ' site:wikipedia.org', num=prm.max_candidates,stop=prm.max_candidates, pause=45):
            title = page.replace('https://en.wikipedia.org/wiki/','').replace('_',' ').lower()
            if title in titles_pos:
                c.append(titles_pos(title))

        candidates.append(c)
        
    return candidates 
Example 2
Project: customized-keyword-web-crawler   Author: zhucebuliaolongchuan   File: web_crawler.py    MIT License 6 votes vote down vote up
def get_start_pages(self):
        """
        Get the initial 10 start pages using google api
        :return: void.
        """
        print "Getting the initial pages"
        for url in search(self.query, tld="com", num=self.n, start=0, stop=1, pause=2):
            if url not in self.url_obj:
                self.url_obj[url] = Urls(url)
                self.queue.append(url)
        # Set up the PageRank value for the start pages
        page_rank_value = 1.0 / len(self.url_obj)
        for key in self.url_obj:
            self.url_obj[key].set_page_rank(page_rank_value)
            print key
        print "\n" 
Example 3
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 6 votes vote down vote up
def PageTitleFilterPageGenerator(generator, ignore_list):
    """
    Yield only those pages are not listed in the ignore list.

    @param ignore_list: family names are mapped to dictionaries in which
        language codes are mapped to lists of page titles. Each title must
        be a valid regex as they are compared using L{re.search}.
    @type ignore_list: dict

    """
    def is_ignored(page):
        if page.site.code in ignore_list.get(page.site.family.name, {}):
            for ig in ignore_list[page.site.family.name][page.site.code]:
                if re.search(ig, page.title()):
                    return True
        return False

    for page in generator:
        if is_ignored(page):
            if config.verbose_output:
                pywikibot.output('Ignoring page %s' % page.title())
        else:
            yield page 
Example 4
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 6 votes vote down vote up
def SearchPageGenerator(query, step=None, total=None, namespaces=None,
                        site=None):
    """
    Yield pages from the MediaWiki internal search engine.

    @param step: Maximum number of pages to retrieve per API query
    @type step: int
    @param total: Maxmum number of pages to retrieve in total
    @type total: int
    @param site: Site for generator results.
    @type site: L{pywikibot.site.BaseSite}
    """
    if site is None:
        site = pywikibot.Site()
    for page in site.search(query, step=step, total=total,
                            namespaces=namespaces):
        yield page 
Example 5
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 6 votes vote down vote up
def __init__(self, query=None, total=100, site=None):
        """
        Constructor.

        @param site: Site for generator results.
        @type site: L{pywikibot.site.BaseSite}
        """
        raise RuntimeError(
            'pagegenerator YahooSearchPageGenerator is not functional.\n'
            'See https://phabricator.wikimedia.org/T106085')

        self.query = query or pywikibot.input(u'Please enter the search query:')
        self.total = total
        if site is None:
            site = pywikibot.Site()
        self.site = site 
Example 6
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 6 votes vote down vote up
def queryYahoo(self, query):
        """Perform a query using python package 'pYsearch'."""
        try:
            from yahoo.search.web import WebSearch
        except ImportError:
            pywikibot.error("ERROR: generator YahooSearchPageGenerator "
                            "depends on package 'pYsearch'.\n"
                            "To install, please run: pip install pYsearch")
            exit(1)

        srch = WebSearch(config.yahoo_appid, query=query, results=self.total)
        dom = srch.get_results()
        results = srch.parse_results(dom)
        for res in results:
            url = res.Url
            yield url 
Example 7
Project: EvilTool   Author: mthbernardes   File: EvilTool.py    GNU General Public License v2.0 6 votes vote down vote up
def arguments():
    parser = argparse.ArgumentParser(description = banner())
    parser.add_argument('-m', '--mode', action = 'store', dest = 'mode',required = True, help = 'Mode of search, use google, censys or single')
    parser.add_argument('-d', '--dork', action = 'store', dest = 'dork', default='filetype:cgi', required = False, help = 'Set the google dork.')
    parser.add_argument('-p', '--proxy', action = 'store', dest = 'proxy',required = False, help = 'Set proxy Server to Google search, or use auto to generate a list of proxies')
    parser.add_argument('-u', '--url', action = 'store', dest = 'url', required = False, help= 'Set URL to test ShellShock Vulnerability')
    args = parser.parse_args()
    if args.mode.lower() == 'google':
        dork = args.dork
        proxy = args.proxy
        search_google(dork,proxy)
    elif args.mode.lower() == 'censys':
        search(API_URL,UID,SECRET,page,pages)
    elif args.mode.lower() == 'single':
        url = args.url
        if url is None:
            parser.print_help()
        else:
            single(url)
    else:
        parser.print_help() 
Example 8
Project: EvilTool   Author: mthbernardes   File: EvilTool.py    GNU General Public License v2.0 6 votes vote down vote up
def search_google(dork,proxy):
    from google import search
    if proxy is None:
        for url in search(dork,lang='pt-br',tld='com.br',pause=random.uniform(2.3, 4.7)):
            test_conn(url)
        print

    elif proxy == 'auto':
        gen_proxy(dork)

    else:
        try:
            for url in search(dork,ip=proxy,conn_type='http',lang='pt-br',tld='com.br',pause=random.uniform(1.3, 4.7)):
                test_conn(url)
        except Exception,e:
            cprint('[!] - Error whiling using proxy '+proxy,'green','on_red')
            print str(e)
        print 
Example 9
Project: Luna   Author: Moonlington   File: Searches.py    Apache License 2.0 6 votes vote down vote up
def youtube(self, ctx, *, ytsearch: str):
        """Does a little YouTube search."""
        opener = request.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        search = ytsearch.split()
        search = "+".join(search)
        errorthing = ytsearch
        url = ('https://www.youtube.com/results?search_query={}'.format(search))
        ourUrl = opener.open(url).read()
        await self.bot.type()
        soup = bs(ourUrl, "html.parser")
        alexpls = re.findall('"(/watch\?v=.*?)"',
                             str(soup.find_all('a',
                                               attrs={'href': re.compile('^/watch\?v=.*')})))
        try:
            await self.bot.say('{}: https://www.youtube.com{}'.format(ctx.message.author.mention, alexpls[0]))
        except IndexError:
            await self.bot.say('Sorry I could not find any results containing the name `{}`'.format(errorthing)) 
Example 10
Project: AnonSQLiScanner   Author: Anon6372098   File: pysearch.py    Apache License 2.0 6 votes vote down vote up
def dork(self, query, pages, filename):
        """ search the given dork from google and
            return the search result """
        print "[+] Googling untuk %s " % query
        url_list = []

        try:
            for url in search(query, stop=pages):
                url_list.append(url)
        except HTTPError:
            print "[HTTP Error 503] Servis Tak Bisa Dijangkau"
            print "Coba dork lain, saya sarankan menggunakan VPN"
            exit(1)

        if len(url_list) != 0:
            print "Hasilnya : %i" % len(url_list)
            output = file(filename, "w")
            for url in url_list:
                output.write(url + "\n")
            output.close()
        else:
            print "Tak ada hasil yang ditemukan"
            exit() 
Example 11
Project: AnonSQLiScanner   Author: Anon6372098   File: pysearch.py    Apache License 2.0 6 votes vote down vote up
def dork(self, query, pages, filename):
        """ search the given dork from google and
            return the search result """
        print "[+] Googling untuk %s " % query
        url_list = []

        try:
            for url in search(query, stop=pages):
                url_list.append(url)
        except HTTPError:
            print "[HTTP Error 503] Servis Tak Bisa Dijangkau"
            print "Coba dork lain, saya sarankan menggunakan VPN"
            exit(1)

        if len(url_list) != 0:
            print "Hasilnya : %i" % len(url_list)
            output = file(filename, "w")
            for url in url_list:
                output.write(url + "\n")
            output.close()
        else:
            print "Tak ada hasil yang ditemukan"
            exit() 
Example 12
Project: PlayThatSong   Author: akohli96   File: tswift_modified.py    Apache License 2.0 5 votes vote down vote up
def find_song(lyrics):
        for url in search('song lyrics ' + lyrics + "metrolyrics", stop=20):
            if re.match(SONG_RE, url):
                return Song(url=url)
        return None 
Example 13
Project: raspberrypi_AI-NO-GUI   Author: Comm4nd0   File: search.py    MIT License 5 votes vote down vote up
def google(query):
    #Not used yet but i might make it a fall back if wiki of wolf doesn't return any res
    for url in search(query, stop=5):
        add = url
    html = urllib.request.urlopen(add).read()
    soup = BeautifulSoup(html, "html5lib")
    [s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title'])]
    visible_text = soup.getText()

    print (visible_text)
    return visible_text 
Example 14
Project: raspberrypi_AI-NO-GUI   Author: Comm4nd0   File: search.py    MIT License 5 votes vote down vote up
def wiki(query):
    #wiki search and return top 3 sentences
    para = wikipedia.summary(query, sentences=3)
    return para 
Example 15
Project: raspberrypi_AI-NO-GUI   Author: Comm4nd0   File: search.py    MIT License 5 votes vote down vote up
def wolf(query):
    #wolfram search and return all text
    wolfAPIkey = config['wolfram']['apikey']
    client = wolframalpha.Client(wolfAPIkey)
    client = wolframalpha.Client("ADD YOUR API KEY HERE!")
    res = client.query(query)

    a = len(res.pods)
    resStr = ""
    for num in range(a):
        print(res.pods[num].text)
        resStr += res.pods[num].text
    return resStr 
Example 16
Project: Email-Search   Author: TaconeoMental   File: EmailSearch.py    Apache License 2.0 5 votes vote down vote up
def email_search(dominio, maxN):
    emails_total = []
    regex_email = re.compile(r'\b[\w.-][email protected]\w+?\.\w+?\b')

# En caso de que maxN sea 0 (El default), ya que no sabemos el índice máximo al que llegaremos, se imprime un signo de interrogación
    max_indice = None
    if maxN == 0:
        max_indice = "?"
    else:
        max_indice = maxN

    try:
        query = "site:{}".format(dominio)
        res_busqueda = search(query, stop=maxN)
        for ind, url in enumerate(res_busqueda):
            headers = {"User-Agent": user_agent()}
            req = requests.get(url, headers=headers)
            emails = re.findall(regex_email, req.text)
            for email in emails:
                if email not in emails_total:
                    emails_total.append(email)

            sys.stdout.write("\r{bold}{amarillo}{0}/{1}{end} {bold}páginas analizadas | {bold}{amarillo}{2}{end} {bold}emails encontrados{end}".format(ind + 1, max_indice, len(emails_total), amarillo=colores.AMARILLO, end=colores.ENDC, bold=colores.BOLD))
            sys.stdout.flush()

    except KeyboardInterrupt:
        pass

    finally:
        return emails_total


# Definimos la función main() donde se comenzará a ejecutar el programa 
Example 17
Project: Question-Answering-System   Author: AdityaAS   File: get_10_summary.py    MIT License 5 votes vote down vote up
def get_10_summary(query, source="google"):
    """
    This function returns the first ten (or less, if 10 are not present) summaries when the query (a string) is run on the source (here google).
    The return type is a beautifulSoup module's object and is similar to a list
    """
    
    result = search(query) #calls query on google
    #print "---------------------------" + str(type(results)) + "---------------------------"
    return result 
Example 18
Project: Liljimbo-Chatbot   Author: chrisjim316   File: app.py    MIT License 5 votes vote down vote up
def __init__(self, symptoms):
		"""
		self.symptoms is a set of all symptoms
		self.url is a list of all urls relating to those symptoms
		self.website_content is the website content of the first self.url
		self.disease_name is pretty much the entirety of the disease, it was miss-called "disease name
		"""
		self.symptoms = symptoms
		self.urls = list(self.search(self.symptoms))
		self.website_content = self.parse_websites(self.urls)
		#self.disease_name = self.get_info(self.website_content)
		# self.disease name
		# self.disease_symptoms
		# self.description 
Example 19
Project: Liljimbo-Chatbot   Author: chrisjim316   File: app.py    MIT License 5 votes vote down vote up
def search(self, symptoms):
		# given a list of symptoms, google the symptoms and return top 3 results
		from google import search
		symptoms = ' '.join(symptoms)
		symptoms_search = symptoms + " \"nhs\""
		logging.debug(symptoms_search)
		return (search(symptoms_search, stop=3)) 
Example 20
Project: Liljimbo-Chatbot   Author: chrisjim316   File: app.py    MIT License 5 votes vote down vote up
def receive_message(message):
	logging.debug(message)
	symptoms = parse_message(message)
	search(symptoms) 
Example 21
Project: customer-service-chatbot   Author: xploiter-projects   File: bot.py    Apache License 2.0 5 votes vote down vote up
def Importance():
   filtered_words = [word for word in words if word not in stopwords.words('english')]
   wordfreq = []
   for w in filtered_words:
       wordfreq.append(filtered_words.count(w))
   Frequency= str(zip(filtered_words, wordfreq))
   Free=list(set([Frequency]))
   count = Counter(filtered_words)
   return count

#function to search google if user want to get links from google 
Example 22
Project: customer-service-chatbot   Author: xploiter-projects   File: bot.py    Apache License 2.0 5 votes vote down vote up
def SearchGoogle():
      
   UrlCount=0

   for url in search(message, stop=1):
       print(url)
       UrlCount +=1
       if UrlCount == 3:
          break
#function to be used by SameCheck function to return keys for dictionary that
#we built for our bot to use keywords that are stored by last questions to get the results. 
Example 23
Project: laurence-bot   Author: c4software   File: google.py    Apache License 2.0 5 votes vote down vote up
def cmd_do_googlesearch(msg):
    try:
        if msg["query"]:
            for url in search(msg["query"], tld='fr', lang='fr', num=1, stop=1):
                return "Voici le premier résultat \n {0}".format(url)
        else:
            mark_for_awaiting_response(username_or_channel(msg), "google")
            return "Oui ? Que recherchez vous ?"
    except Exception as e:  # pragma: no cover
        return "Recherche impossible." 
Example 24
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 5 votes vote down vote up
def __filter_match(cls, regex, string, quantifier):
        """Return True if string matches precompiled regex list.

        @param quantifier: a qualifer
        @type quantifier: str of 'all', 'any' or 'none'
        @rtype: bool
        """
        if quantifier == 'all':
            match = all(r.search(string) for r in regex)
        else:
            match = any(r.search(string) for r in regex)
        return (quantifier == 'none') ^ match 
Example 25
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 5 votes vote down vote up
def queryGoogle(self, query):
        """
        Perform a query using python package 'google'.

        The terms of service as at June 2014 give two conditions that
        may apply to use of search:

            1. Dont access [Google Services] using a method other than
               the interface and the instructions that [they] provide.
            2. Don't remove, obscure, or alter any legal notices
               displayed in or along with [Google] Services.

        Both of those issues should be managed by the package 'google',
        however Pywikibot will at least ensure the user sees the TOS
        in order to comply with the second condition.
        """
        try:
            import google
        except ImportError:
            pywikibot.error("ERROR: generator GoogleSearchPageGenerator "
                            "depends on package 'google'.\n"
                            "To install, please run: pip install google.")
            exit(1)
        pywikibot.warning('Please read http://www.google.com/accounts/TOS')
        for url in google.search(query):
            yield url 
Example 26
Project: pywikibot   Author: metakgp   File: pagegenerators.py    MIT License 5 votes vote down vote up
def WikibaseSearchItemPageGenerator(text, language=None, total=None, site=None):
    """
    Generate pages that contain the provided text.

    @param text: Text to look for.
    @type text: str
    @param language: Code of the language to search in. If not specified,
        value from pywikibot.config.data_lang is used.
    @type language: str
    @param total: Maximum number of pages to retrieve in total, or None in
        case of no limit.
    @type total: int or None
    @param site: Site for generator results.
    @type site: L{pywikibot.site.BaseSite}
    """
    if site is None:
        site = pywikibot.Site()
    if language is None:
        language = site.lang
    repo = site.data_repository()

    data = repo.search_entities(text, language, limit=total)
    pywikibot.output(u'retrieved %d items' % len(list(data)))
    for item in data:
        yield pywikibot.ItemPage(repo, item['id'])


# Deprecated old names available for compatibility with compat. 
Example 27
Project: pyoverflow   Author: qboticslabs   File: pyoverflow.py    GNU General Public License v2.0 5 votes vote down vote up
def submit_err(error_msg,no_solution):
	print "\n"
	print "Please wait ................ "
	print "Pyoverflow is checking for the top solutions for your code problems" 
	print ":)"
	print "\n" 

	try:
		search_word = "python"+" "+str(error_msg)
		for url in search(search_word, stop=2):
			search_result.append(url)
	except Exception as e:
		print e
		sys.exit(0)
	
	try:	
		if(int(no_solution) > 0):
			for i in range(0,int(no_solution)):
				#print search_result[i]
				print "Opening"+"\t"+str(i)+" solution in browser"
				webbrowser.open_new_tab(search_result[i])
		else:
			print "Number of solutions should be > 0"

	except Exception as e:
		print e
		sys.exit(0) 
Example 28
Project: EvilTool   Author: mthbernardes   File: EvilTool.py    GNU General Public License v2.0 5 votes vote down vote up
def proxy_auto(ip,port,proxy_type,dork):
    from google import search
    try:
        proxy_url = str("%s:%s") %(ip,port)
        cprint("[+] - Using Proxy "+proxy_url,'blue')
        print
        for url in search(dork,ip=proxy_url,conn_type=proxy_type,lang='pt-br',tld='com.br',pause=random.uniform(1.3, 4.7)):
            test_conn(url)
    except Exception,e:
        cprint('[!] - Error whiling using proxy '+proxy_url,'green','on_red')
        print str(e)
        print 
Example 29
Project: EvilTool   Author: mthbernardes   File: EvilTool.py    GNU General Public License v2.0 5 votes vote down vote up
def search(API_URL,UID,SECRET,page,pages):
    check_conf()
    while page <= pages:
        query = {'query':'80.http.get.title:/cgi-bin/test.cgi','page':page}
        res = requests.post(API_URL + "/search/ipv4", auth=(UID, SECRET), json=query)
        res_json = res.json()
        if res.status_code == 200:
            build_url(res_json)
        pages = res_json['metadata']['pages']
        page += 1 
Example 30
Project: Luna   Author: Moonlington   File: Searches.py    Apache License 2.0 5 votes vote down vote up
def _google(self, ctx, *, googlesearch):
        """Googles stuff"""
        query = googlesearch
        await self.bot.type()
        url = google.search(query)
        await self.bot.say(
            '{}, you searched for: **{}**\nThis is the result: {}'.format(ctx.message.author.mention, query, next(url))) 
Example 31
Project: Sploits   Author: iDuronto   File: googd0rker.py    MIT License 5 votes vote down vote up
def google_it (site,dork,filename):
	clear_cookie()
	out=open(filename,"a")
	for title in search(dork, stop=50):
            	print(title)
            	out.write(title)
            	out.write("\n")
	out.close() 
Example 32
Project: whoUR   Author: jopcode   File: googleScanner.py    GNU General Public License v3.0 4 votes vote down vote up
def googleScanner():
    dork = raw_input(G+'\n[+] Please enter a dork to use(inurl:"index.php?id=") leave in blank for use a random dork:: '+B)
    if not dork:
        dork = randomDork()
        print B+'\n[!] Using '+G+dork
    try:
        hitLimit = int(raw_input(G+"[+] Please enter a number of hits(10):: "+B))
        if not hitLimit :
            raise ValueError('null')
    except ValueError:
            print lR+"\n[!] Please enter a Number\n"
            hitLimit = int(raw_input(G+"[+] Please enter a number of hits(10):: "+B))

    print P+"\n--------------------"
    print P+"-G O O G L E  S C A N N E R-"
    print P+"--------------------\n"
    print B+"[!] Scann in Process...\n"

    vulSites = []
    
    try:
        
        for url in search(dork, stop=hitLimit, start=0):
            
            print G+"[!] Checking "+B+url+"\n"
            
            try:
                isVul = checkUrl(url)
                if isVul == "true":
                    vulSites.append(url)
            except:
                continue
    
    except HTTPError:
        exit("[503] Service Unreachable")
    
    except URLError:
       exit("[504] Gateway Timeout")
    
    else:
        for vulnerable in vulSites:
            print G+'[!] '+B+vulnerable+G+' Has been vulnerable'
        
        if len(vulSites) == 0:
            print lR+'[!] 0 vulnerable sites has been found'
            return False

        saveSites(vulSites) 
Example 33
Project: AutOSINT   Author: bharshbarger   File: googledork.py    MIT License 4 votes vote down vote up
def run(self, args, lookup, reportDir):

        self.args = args

        #C58EA28C-18C0-4a97-9AF2-036E93DDAFB3 is string for open OWA attachments, for example
        #init lists

        #iterate the lookup list
        for i, l in enumerate(lookup):
            for d in self.args.dorks:

                #add header to result
                self.google_result.append('[i] Google query for: "%s site:%s"' % (str(d),str(l)))

                #open a file for each domain searched
                googleFile=open(reportDir+l+'/'+l+'_google_dork.txt','w')

                #show user whiat is being searched
                print ('[+] Google query %s for %s site:%s' % (str(i + 1),str(d),str(l)))
                print('[+] Results:')
                
                try:
                    #iterate url results from search of password(for now) and site:current list value
                    for url in search(str(self.args.dorks)+' site:'+str(l), stop = 20):
                        #append results together
                        self.google_result.append(url)

                        #rate limit with 2 second delay
                        time.sleep(2)
                #catch exceptions
                except Exception as e:
                    print ('[!] Error encountered: %s' % e)
                    pass
        #iterate results
        for r in self.google_result:
            #write results on newlines
            googleFile.writelines(r + '\r\n')

        #verbosity flag
        if self.args.verbose is True:
            for r in self.google_result: print (''.join(r))
                
        #return results list
        return self.google_result 
Example 34
Project: Liljimbo-Chatbot   Author: chrisjim316   File: app.py    MIT License 4 votes vote down vote up
def parse_websites(self, search):
		logging.debug(search)
		# given an NHS website, parse it for illness name, description, symptoms and cure
		list_of_request_objects = [requests.get(i) for i in search]
		for item in list_of_request_objects:
			if item == "<Response [404]>":
				list_of_request_objects.remove(i)
			else:
				continue
		# if objects are 404 errors, delete them from the list as it'll be useless to us.

		list_of_contents = []

		list_of_contents.append(list_of_request_objects[0].content)

		# gets the contents of 3 requests objects and stores them in a list

		# Beautiful soup section #

		bs_objects = list(map(lambda x: BeautifulSoup(x, 'html.parser'), list_of_contents))
		# makes beautiful soup objects (needed to parse) out of the requests

		# bs_objects = BeautifulSoup(list_of_request_objects[0], 'html.parser')


		# here we begin the distinction between objects. Each article represents a different top-scoring webpage's "article" content.
		article1 = bs_objects[0].find("div", {"class": "article"})
		
		# article2 = bs_objects[1].findAll("div", {"class": "article"})
		# article3 = bs_objects[2].findAll("div", {"class": "article"})


		text = list(article1.get_text())
		"""
		for i in text:
			if "stock" in i.lower():
				text.pop(i)
		"""
		text = "".join(text)

		return(text)
	# def get_title(self, article) 
Example 35
Project: Luna   Author: Moonlington   File: Searches.py    Apache License 2.0 4 votes vote down vote up
def fanfiction(self, *, fanfucksearch: str):
        """Searches the shittiest and weirdest fanfics on fanfiction.net"""
        search = fanfucksearch.split()
        thing = "+".join(search)
        errorthing = fanfucksearch
        url = 'https://www.fanfiction.net/search.php?ready=1&keywords={}&categoryid=0&genreid1=0&genreid2=0&languageid=1&censorid=4&statusid=2&type=story&match=&sort=&ppage=1&characterid1=0&characterid2=0&characterid3=0&characterid4=0&words=1&formatid=0'.format(
            thing)
        opener = request.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/5.0')]
        ourUrl = opener.open(url).read()
        soup = bs(ourUrl, "html.parser")
        alexpls = re.findall(
            '"(/s/.*?)"', str(soup.find_all('a', attrs={'href': re.compile('/s/*')})))
        try:
            x = len(alexpls)
            newurl = 'https://www.fanfiction.net' + \
                alexpls[random.randrange(x)]
            ourUrl = opener.open(newurl).read()
            soup = bs(ourUrl, "html.parser")
            alexpls2 = re.findall('<p>(.*?)</p>', str(soup.find_all('p')))
            text = ''
            for y in alexpls2:
                text += y + '\n'
            thingy = re.findall(
                '<a.*?>(.*?)</a>', str(soup.find_all('a', attrs={'href': re.compile('/u/*')})))
            text = re.sub('</?em>', '*', text)
            text = re.sub('</?strong>', '**', text)
            fmt = 'Fanfuck: **{}** by **{}**\n'.format(re.sub(', a .*? fanfic \| FanFiction', '', soup.title.text), thingy[0])
            fmt += text + '\n'
            fmt += 'Url: {}'.format(newurl)
            await self.bot.say(fmt)
        except ValueError:
            await self.bot.say("Sorry, but no fanfucks were found with the name: **{}**".format(errorthing))

    # @commands.command(pass_context=True, aliases=['im', 'photo', 'img'])
    # async def image(self, ctx, *, text:str):
    #     requrl = "https://api.cognitive.microsoft.com/bing/v5.0/images/search?q={}".format(text)
    #     res = await aiohttp.get(requrl, headers={"Ocp-Apim-Subscription-Key": "c84ce598f43143bbb35031a134d7ddd1"})
    #     strem = await res.json()
    #     print(strem)
    #     images = strem["value"]
    #     randimg = random.choice(images)
    #     async with aiohttp.get(randimg["contentUrl"]) as r:
    #         data = await r.read()
    #         img = BytesIO(data)
    #     await self.bot.upload(img)