Python urllib2.urlopen() Examples

The following are 30 code examples for showing how to use urllib2.urlopen(). These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.

You may want to check out the right sidebar which shows the related API usage.

You may also want to check out all available functions/classes of the module urllib2 , or try the search function .

Example 1
Project: PiPark   Author: Humpheh   File: senddata.py    License: GNU General Public License v2.0 7 votes vote down vote up
def post_request(vals, url):
    """
    Build a post request.

    Args:
        vals: Dictionary of (field, values) for the POST
            request.
        url: URL to send the data to.

    Returns:
        Dictionary of JSON response or error info.
    """
    # Build the request and send to server
    data = urllib.urlencode(vals)
    
    try:
        request  = urllib2.Request(url, data)
        response = urllib2.urlopen(request)
    except urllib2.HTTPError, err:
        return {"error": err.reason, "error_code": err.code} 
Example 2
Project: L.E.S.M.A   Author: NatanaelAntonioli   File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    License: Apache License 2.0 6 votes vote down vote up
def run(self):
		request = self.request
		try:
			if ((timeit.default_timer() - self.starttime) <= self.timeout and
					not SHUTDOWN_EVENT.isSet()):
				try:
					f = urlopen(request)
				except TypeError:
					# PY24 expects a string or buffer
					# This also causes issues with Ctrl-C, but we will concede
					# for the moment that Ctrl-C on PY24 isn't immediate
					request = build_request(self.request.get_full_url(),
											data=request.data.read(self.size))
					f = urlopen(request)
				f.read(11)
				f.close()
				self.result = sum(self.request.data.total)
			else:
				self.result = 0
		except (IOError, SpeedtestUploadTimeout):
			self.result = sum(self.request.data.total) 
Example 3
Project: neural-fingerprinting   Author: StephanZheng   File: download_images.py    License: BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def download_image(image_id, url, x1, y1, x2, y2, output_dir):
    """Downloads one image, crops it, resizes it and saves it locally."""
    output_filename = os.path.join(output_dir, image_id + '.png')
    if os.path.exists(output_filename):
        # Don't download image if it's already there
        return True
    try:
        # Download image
        url_file = urlopen(url)
        if url_file.getcode() != 200:
            return False
        image_buffer = url_file.read()
        # Crop, resize and save image
        image = Image.open(BytesIO(image_buffer)).convert('RGB')
        w = image.size[0]
        h = image.size[1]
        image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w),
                            int(y2 * h)))
        image = image.resize((299, 299), resample=Image.ANTIALIAS)
        image.save(output_filename)
    except IOError:
        return False
    return True 
Example 4
Project: CAMISIM   Author: CAMI-challenge   File: update.py    License: Apache License 2.0 6 votes vote down vote up
def verifyChecksumForDir(dirName, settings, type):
    """
        For each file listed in a list, verifies its checksum.
    """

    dirName = os.path.join(settings.getLocalDst(type), dirName)

    urlChecksumFile = settings.getRemoteSrc() + '/' + os.path.basename(dirName) + '.checksum'
    print("Verification of the decompressed directory '%s' started." % dirName)
    try:
        for line in urllib2.urlopen(urlChecksumFile):
            f, checksum = line.split('\t')
            f = os.path.join(dirName, f)
            # if checksum.strip() != hashlib.md5(open(f).read()).hexdigest():
            if checksum.strip() != getChecksumForFile(f):
                raise Exception("File '%s' is corrupted, it has a wrong checksum." % f)
    except Exception as e:
        print("Unable to verify directory: %s" % dirName)
        raise e

    print("Checksum verification completed successfully!") 
Example 5
Project: CAMISIM   Author: CAMI-challenge   File: get_genomes.py    License: Apache License 2.0 6 votes vote down vote up
def download_genome(genome, out_path):
    genome_path = os.path.join(out_path,"genomes")
    out_name = genome.rstrip().split('/')[-1]
    http_address = os.path.join(genome, out_name + "_genomic.fna.gz")
    opened = urllib2.urlopen(http_address)
    out = os.path.join(genome_path, out_name + ".fa")
    tmp_out = os.path.join(genome_path, out_name + "tmp.fa")
    out_gz = out + ".gz"
    with open(out_gz,'wb') as outF:
        outF.write(opened.read())
    gf = gzip.open(out_gz)
    new_out = open(tmp_out,'wb')
    new_out.write(gf.read())
    gf.close()
    os.remove(out_gz)
    new_out.close()
    split_by_N(tmp_out, out)
    return out 
Example 6
Project: dynamic-training-with-apache-mxnet-on-aws   Author: awslabs   File: diagnose.py    License: Apache License 2.0 6 votes vote down vote up
def test_connection(name, url, timeout=10):
    """Simple connection test"""
    urlinfo = urlparse(url)
    start = time.time()
    try:
        ip = socket.gethostbyname(urlinfo.netloc)
    except Exception as e:
        print('Error resolving DNS for {}: {}, {}'.format(name, url, e))
        return
    dns_elapsed = time.time() - start
    start = time.time()
    try:
        _ = urlopen(url, timeout=timeout)
    except Exception as e:
        print("Error open {}: {}, {}, DNS finished in {} sec.".format(name, url, e, dns_elapsed))
        return
    load_elapsed = time.time() - start
    print("Timing for {}: {}, DNS: {:.4f} sec, LOAD: {:.4f} sec.".format(name, url, dns_elapsed, load_elapsed)) 
Example 7
Project: sqliv   Author: the-robot   File: web.py    License: GNU General Public License v3.0 6 votes vote down vote up
def gethtml(url, lastURL=False):
    """return HTML of the given url"""

    if not (url.startswith("http://") or url.startswith("https://")):
        url = "http://" + url

    header = useragents.get()
    request = urllib2.Request(url, None, header)
    html = None

    try:
        reply = urllib2.urlopen(request, timeout=10)

    except urllib2.HTTPError, e:
        # read html content anyway for reply with HTTP500
        if e.getcode() == 500:
            html = e.read()
        #print >> sys.stderr, "[{}] HTTP error".format(e.code)
        pass 
Example 8
Project: sqliv   Author: the-robot   File: reverseip.py    License: GNU General Public License v3.0 6 votes vote down vote up
def reverseip(url):
    """return domains from given the same server"""

    # get only domain name
    url = urlparse(url).netloc if urlparse(url).netloc != '' else urlparse(url).path.split("/")[0]

    source = "http://domains.yougetsignal.com/domains.php"
    useragent = useragents.get()
    contenttype = "application/x-www-form-urlencoded; charset=UTF-8"

    # POST method
    opener = urllib2.build_opener(
        urllib2.HTTPHandler(), urllib2.HTTPSHandler())
    data = urllib.urlencode([('remoteAddress', url), ('key', '')])

    request = urllib2.Request(source, data)
    request.add_header("Content-type", contenttype)
    request.add_header("User-Agent", useragent)

    try:
        result = urllib2.urlopen(request).read()

    except urllib2.HTTPError, e:
        print >> sys.stderr, "[{}] HTTP error".format(e.code) 
Example 9
Project: sqliv   Author: the-robot   File: yahoo.py    License: GNU General Public License v3.0 6 votes vote down vote up
def search(self, query, per_page=10, pages=1):
        """search urls from yahoo search"""

        # store searched urls
        urls = []

        for page in range(pages):
            yahoosearch = self.yahoosearch % (query, per_page, (pages+1)*10)

            request = urllib2.Request(yahoosearch)
            request.add_header("Content-type", self.contenttype)
            request.add_header("User-Agent", self.useragent)

            result = urllib2.urlopen(request).read()
            urls += self.parse_links(result)

        return urls 
Example 10
Project: macops   Author: google   File: can_haz_image.py    License: Apache License 2.0 6 votes vote down vote up
def DownloadFile(self, fileurl, dlfile):
    """Downloads a given file to a given path/filename.

    Args:
      fileurl: String with URL of file to download.
      dlfile: String with path of file to be written to.
    Raises:
      OSError: If file cannot be opened/written to, function raises OSError.
      URLError: If URL cannot be opened, fucntion raises URLError.
    """
    if not os.path.isfile(dlfile) or dlfile == TMPINDEX:
      print 'Downloading %s ...' % fileurl
      file_to_dl = urllib2.urlopen(fileurl)
      tmpfile = open(dlfile, 'wb')
      shutil.copyfileobj(file_to_dl, tmpfile)
    else:
      print '%s exists' % dlfile 
Example 11
Project: subtake   Author: kp625544   File: subtake.py    License: GNU General Public License v2.0 6 votes vote down vote up
def subdomain_check(subdomains):
    # Basic query
    for subd in range(len(subdomains)):
        if subd != 0:
            try:
                #print("inside query")
                for rdata in dns.resolver.query(subdomains[subd], 'CNAME') :
                    print "Checking subdomain takeover on:  "+str(subdomains[subd])
                    try:
                        #response = urlopen("http://"+str(rdata.target))
                        response = urlopen("http://"+str(subdomains[subd]))
                        print(R+str(subdomains[subd])+"  seems Up and running fine")
                    except:
                        print(S+"Success!!! Possible sub-domain takeover on:     "+str(subdomains[subd])+E)
            except:
                print (R+"No CNAME for"+str(subdomains[subd])+"i.e. subdomain takeover not Possible") 
Example 12
Project: awesome-zio   Author: aparo   File: metadata.py    License: Apache License 2.0 6 votes vote down vote up
def query(owner, name):
    if fake:
        print '    {0}/{1}: ok'.format(owner, name)
        return (random.randint(1, 1000), random.randint(1, 300))
    else:
        try:
            req = urllib2.Request('https://api.github.com/repos/{0}/{1}'.format(owner, name))
            if user is not None and token is not None:
                b64 = base64.encodestring('{0}:{1}'.format(user, token)).replace('\n', '')
                req.add_header("Authorization", "Basic {0}".format(b64))
            u = urllib2.urlopen(req)
            j = json.load(u)
            t = datetime.datetime.strptime(j['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
            days = max(int((now - t).days), 0)
            print '    {0}/{1}: ok'.format(owner, name)
            return (int(j['stargazers_count']), days)
        except urllib2.HTTPError, e:
            print '    {0}/{1}: FAILED'.format(owner, name)
            return (None, None) 
Example 13
Project: tdw   Author: tdw1980   File: krasfs.py    License: GNU General Public License v3.0 6 votes vote down vote up
def upd(category, sort, str):
		post = urllib.urlencode({'checkbox_ftp':'on', 'checkbox_tor':'on','word':str}) 
		request = urllib2.Request('http://krasfs.ru/search.php?key=newkey')#url, post)

		request.add_header('User-Agent', 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)') 
		request.add_header('Host',    'www.krasfs.ru') 
		request.add_header('Accept', '*/*') 
		request.add_header('Accept-Language', 'ru-RU') 
		request.add_header('Referer',    'http://www.krasfs.ru') 

		try: 
			f = urllib2.urlopen(request) 
			html = f.read()
			html = html.replace(chr(10),"")
			n=html.find("<newkey>")
			k=html.find("</newkey>")
			key = html[n+8:k]
		except IOError, e: 
			if hasattr(e, 'reason'): 
				print 'We failed to reach a server. Reason: '+ e.reason
			elif hasattr(e, 'code'): 
				print 'The server couldn\'t fulfill the request. Error code: '+ e.code
			key = "59165b78-bf91-11e1-86bf-c6ab051766ba" 
Example 14
Project: script.module.inputstreamhelper   Author: emilsvennesson   File: utils.py    License: MIT License 6 votes vote down vote up
def _http_request(url, headers=None, time_out=10):
    """Perform an HTTP request and return request"""
    log(0, 'Request URL: {url}', url=url)

    try:
        if headers:
            request = Request(url, headers=headers)
        else:
            request = Request(url)
        req = urlopen(request, timeout=time_out)
        log(0, 'Response code: {code}', code=req.getcode())
        if 400 <= req.getcode() < 600:
            raise HTTPError('HTTP %s Error for url: %s' % (req.getcode(), url), response=req)
    except (HTTPError, URLError) as err:
        log(2, 'Download failed with error {}'.format(err))
        if yesno_dialog(localize(30004), '{line1}\n{line2}'.format(line1=localize(30063), line2=localize(30065))):  # Internet down, try again?
            return _http_request(url, headers, time_out)
        return None

    return req 
Example 15
Project: dump1090-tools   Author: mutability   File: fetch-dump1090-max-range.py    License: ISC License 6 votes vote down vote up
def get_max_range(baseurl):
    with closing(urlopen(baseurl + '/data/receiver.json', None, 5.0)) as f:
        receiver = json.load(f)

        if not (receiver.has_key('lat') and receiver.has_key('lon')):
            return None

        rlat = receiver['lat']
        rlon = receiver['lon']

        maxrange = None
        with closing(urlopen(baseurl + '/data/aircraft.json', None, 5.0)) as f:
            aircraft = json.load(f)
            for ac in aircraft['aircraft']:
                if ac.has_key('seen_pos') and ac['seen_pos'] < 300:
                    alat = ac['lat']
                    alon = ac['lon']
                    distance = greatcircle(rlat, rlon, alat, alon)
                    if maxrange is None or distance > maxrange:
                        maxrange = distance
                        
        return maxrange 
Example 16
Project: serializekiller   Author: johndekroon   File: serializekiller.py    License: The Unlicense 6 votes vote down vote up
def jenkins(url, port):
    try:
        cli_port = False
        ctx = ssl.create_default_context()
        ctx.check_hostname = False
        ctx.verify_mode = ssl.CERT_NONE
        try:
            output = urllib2.urlopen('https://'+url+':'+port+"/jenkins/", context=ctx, timeout=8).info()
            cli_port = int(output['X-Jenkins-CLI-Port'])
        except urllib2.HTTPError, e:
            if e.getcode() == 404:
                try:
                    output = urllib2.urlopen('https://'+url+':'+port, context=ctx, timeout=8).info()
                    cli_port = int(output['X-Jenkins-CLI-Port'])
                except:
                    pass
        except:
            pass 
Example 17
Project: OpenTrader   Author: OpenTrading   File: OTPpnAmgc.py    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
def lPullYahooToTxtfile(sSymbol):
    '''
        Use this to dynamically pull a sSymbol:
    '''
    try:
        print 'Currently Pulling', sSymbol
        print str(datetime.datetime.fromtimestamp(int(time.time())).strftime('%Y-%m-%d %H:%M:%S'))
        #Keep in mind this is close high low open, lol.
        urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+sSymbol+'/chartdata;type=quote;range=10y/csv'
        lStockLines = []
        try:
            sourceCode = urllib2.urlopen(urlToVisit).read()
            splitSource = sourceCode.split('\n')
            for eachLine in splitSource:
                splitLine = eachLine.split(',')
                if len(splitLine) == 6:
                    if 'values' not in eachLine:
                        lStockLines.append(eachLine)
            return lStockLines
        except Exception as e:
            print str(e), 'failed to organize pulled data.'
    except StandardError, e:
        print str(e), 'failed to pull pricing data' 
Example 18
Project: fetchLandsatSentinelFromGoogleCloud   Author: vascobnunes   File: fels.py    License: MIT License 6 votes vote down vote up
def download_metadata_file(url, outputdir, program):
    """Download and unzip the catalogue files."""
    zipped_index_path = os.path.join(outputdir, 'index_' + program + '.csv.gz')
    if not os.path.isfile(zipped_index_path):
        if not os.path.exists(os.path.dirname(zipped_index_path)):
            os.makedirs(os.path.dirname(zipped_index_path))
        print("Downloading Metadata file...")
        content = urlopen(url)
        with open(zipped_index_path, 'wb') as f:
            shutil.copyfileobj(content, f)
    index_path = os.path.join(outputdir, 'index_' + program + '.csv')
    if not os.path.isfile(index_path):
        print("Unzipping Metadata file...")
        with gzip.open(zipped_index_path) as gzip_index, open(index_path, 'wb') as f:
            shutil.copyfileobj(gzip_index, f)
    return index_path 
Example 19
Project: hsds   Author: HDFGroup   File: get_s3_stats.py    License: Apache License 2.0 6 votes vote down vote up
def get_remote_info_json(jfname):
   try:
      logging.info('loading example '+jfname)
      rfo = urllib.urlopen(jfname)
      di = json.loads(rfo.read())
      nat, glbs = 0, 0
      for k,v in di.items():
        if k != 'dimensions' or k != 'variables':
            glbs +=1 
      for k,v in di['variables'].items():
         for a in v: nat += 1  
      dims = [ l for k, v in di['dimensions'].items() for d, l in v.items() if d == 'length' ]
      return { 'num global attr' : glbs, 'num vars' : len(di['variables'].keys()), 'num dims' : \
               len(di['dimensions'].keys()), 'ave attrs per var' : nat / len(di['variables'].keys()), \
               'dims sizes' : dims }
   except Exception, e:
      logging.warn("WARN get_remote_info_json on %s : %s, update S3 bucket" % (jfname, str(e)))
      return {}

#--------------------------------------------------------------------------------- 
Example 20
Project: awesome-scala   Author: lauris   File: metadata.py    License: Apache License 2.0 6 votes vote down vote up
def query(owner, name):
    if fake:
        print("    {0}/{1}: ok".format(owner, name))
        return (random.randint(1, 1000), random.randint(1, 300))
    else:
        try:
            req = urllib2.Request(
                "https://api.github.com/repos/{0}/{1}".format(owner, name)
            )
            if user is not None and token is not None:
                b64 = base64.encodestring("{0}:{1}".format(user, token)).replace(
                    "\n", ""
                )
                req.add_header("Authorization", "Basic {0}".format(b64))
            u = urllib2.urlopen(req)
            j = json.load(u)
            t = datetime.datetime.strptime(j["updated_at"], "%Y-%m-%dT%H:%M:%SZ")
            days = max(int((now - t).days), 0)
            print("    {0}/{1}: ok".format(owner, name))
            return (int(j["stargazers_count"]), days)
        except urllib2.HTTPError as e:
            print("    {0}/{1}: FAILED".format(owner, name))
            return (None, None) 
Example 21
Project: llvm-zorg   Author: llvm   File: build.py    License: Apache License 2.0 6 votes vote down vote up
def http_download(url, dest):
    """Safely download url to dest.

    Print error and exit if download fails.
    """
    try:
        print("GETting", url, "to", dest, "...", end=' ')
        f = urlopen(url)
        # Open our local file for writing
        with open(dest, "wb") as local_file:
            local_file.write(f.read())

    except HTTPError as e:
        print()
        print("HTTP Error:", e.code, url)
        sys.exit(1)

    except URLError as e:
        print()
        print("URL Error:", e.reason, url)
        sys.exit(1)
    print("done.") 
Example 22
Project: RF-Monitor   Author: EarToEarOak   File: push.py    License: GNU General Public License v2.0 5 votes vote down vote up
def __send(self, uri, data):
        req = urllib2.Request(uri)
        req.add_header('Content-Type', 'application/json')

        event = None
        try:
            urllib2.urlopen(req, data)
        except ValueError as error:
            event = Event(Events.PUSH_ERROR, msg=error.message)
        except URLError as error:
            event = Event(Events.PUSH_ERROR, msg=error.reason.strerror)

        if event is not None:
            self._failed.append(data)
            post_event(self._handler, event) 
Example 23
Project: Learning-Concurrency-in-Python   Author: PacktPublishing   File: ioBottleneck2.py    License: MIT License 5 votes vote down vote up
def getLinks():
  req = urllib2.urlopen('http://www.example.com')
  soup = BeautifulSoup(req.read())
  for link in soup.findAll('a'):
    linkArray.append(link.get('href'))
    print(len(linkArray)) 
Example 24
Project: L.E.S.M.A   Author: NatanaelAntonioli   File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    License: Apache License 2.0 5 votes vote down vote up
def catch_request(request):
	"""Helper function to catch common exceptions encountered when
	establishing a connection with a HTTP/HTTPS request

	"""

	try:
		uh = urlopen(request)
		return uh, False
	except HTTP_ERRORS:
		e = get_exception()
		return None, e 
Example 25
Project: L.E.S.M.A   Author: NatanaelAntonioli   File: L.E.S.M.A. - Fabrica de Noobs Speedtest.py    License: Apache License 2.0 5 votes vote down vote up
def run(self):
		try:
			if (timeit.default_timer() - self.starttime) <= self.timeout:
				f = urlopen(self.request)
				while (not SHUTDOWN_EVENT.isSet() and
						(timeit.default_timer() - self.starttime) <=
						self.timeout):
					self.result.append(len(f.read(10240)))
					if self.result[-1] == 0:
						break
				f.close()
		except IOError:
			pass 
Example 26
Project: CAMISIM   Author: CAMI-challenge   File: update.py    License: Apache License 2.0 5 votes vote down vote up
def __init__(self):
        self._url = "http://algbio.cs.uni-duesseldorf.de/software/ppsp"
        self._customVal = ':'
        self._refVersion = None
        try:
            self._version = urllib2.urlopen(self._url + '/' + 'version.txt').read()
        except urllib2.HTTPError:
            self._version = "1_4"
            print("Can't get the current version from the server, version '%s' will be considered." % self._version) 
Example 27
Project: treelstm.pytorch   Author: dasguptar   File: download.py    License: MIT License 5 votes vote down vote up
def download(url, dirpath):
    filename = url.split('/')[-1]
    filepath = os.path.join(dirpath, filename)
    try:
        u = urllib2.urlopen(url)
    except Exception as e:
        print("URL %s failed to open" % url)
        raise Exception
    try:
        f = open(filepath, 'wb')
    except Exception as e:
        print("Cannot write %s" % filepath)
        raise Exception
    try:
        filesize = int(u.info().getheaders("Content-Length")[0])
    except Exception as e:
        print("URL %s failed to report length" % url)
        raise Exception
    print("Downloading: %s Bytes: %s" % (filename, filesize))

    downloaded = 0
    block_sz = 8192
    status_width = 70
    while True:
        buf = u.read(block_sz)
        if not buf:
            print('')
            break
        else:
            print('', end='\r')
        downloaded += len(buf)
        f.write(buf)
        status = (("[%-" + str(status_width + 1) + "s] %3.2f%%") %
                  ('=' * int(downloaded / filesize * status_width) + '>',
                   downloaded * 100. / filesize))
        print(status, end='')
        sys.stdout.flush()
    f.close()
    return filepath 
Example 28
Project: sqliv   Author: the-robot   File: google.py    License: GNU General Public License v3.0 5 votes vote down vote up
def get_page(url):
    """
    Request the given URL and return the response page, using the cookie jar.

    @type  url: str
    @param url: URL to retrieve.

    @rtype:  str
    @return: Web page retrieved for the given URL.

    @raise IOError: An exception is raised on error.
    @raise urllib2.URLError: An exception is raised on error.
    @raise urllib2.HTTPError: An exception is raised on error.
    """
    request = Request(url)
    request.add_header('User-Agent',
                       'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)')
    cookie_jar.add_cookie_header(request)
    response = urlopen(request)
    cookie_jar.extract_cookies(response, request)
    html = response.read()
    response.close()
    cookie_jar.save()
    return html


# Filter links found in the Google result pages HTML code.
# Returns None if the link doesn't yield a valid result. 
Example 29
Project: sqliv   Author: the-robot   File: bing.py    License: GNU General Public License v3.0 5 votes vote down vote up
def get_page(self, URL):
        '''
        :type URL : str
        :param URL: URL to get HTML source 

        :rtpye: str
        '''

        request = urllib2.Request(URL, headers=self.default_headers())
        resp    = urllib2.urlopen(request)

        return resp.read() 
Example 30
Project: CyberTK-Self   Author: CyberTKR   File: Self.py    License: GNU General Public License v2.0 5 votes vote down vote up
def translate(to_translate, to_language="auto", language="auto"):
    bahasa_awal = "auto"
    bahasa_tujuan = to_language
    kata = to_translate
    url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
    agent = {'User-Agent':'Mozilla/5.0'}
    cari_hasil = 'class="t0">'
    request = urllib2.Request(url, headers=agent)
    page = urllib2.urlopen(request).read()
    result = page[page.find(cari_hasil)+len(cari_hasil):]
    result = result.split("<")[0]
    return result