Python urllib.request.GET Examples

The following are 16 code examples of urllib.request.GET(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module urllib.request , or try the search function .
Example #1
Source File: views.py    From coursys with GNU General Public License v3.0 6 votes vote down vote up
def found(request):
    """
    View to handle the enter-search/press-enter behaviour in the autocomplete box
    """
    if 'search' not in request.GET:
        return ForbiddenResponse(request, 'must give search in query')
    search = request.GET['search']
    studentQuery = get_query(search, ['userid', 'emplid', 'first_name', 'last_name'])
    people = Person.objects.filter(studentQuery)[:200]
    for p in people:
        # decorate with RAAppointment count
        p.ras = RAAppointment.objects.filter(unit__in=request.units, person=p, deleted=False).count()

    context = {'people': people}
    return render(request, 'ra/found.html', context)


#This is an index of all RA Appointments belonging to a given person. 
Example #2
Source File: views.py    From coursys with GNU General Public License v3.0 5 votes vote down vote up
def browse(request):
    if 'tabledata' in request.GET:
        return RADataJson.as_view()(request)

    form = RABrowseForm()
    context = {'form': form, 'supervisor_only': not request.units}
    return render(request, 'ra/browse.html', context) 
Example #3
Source File: views.py    From coursys with GNU General Public License v3.0 5 votes vote down vote up
def filter_queryset(self, qs):
        GET = self.request.GET

        # limit to those visible to this user
        qs = qs.filter(
            Q(unit__in=self.request.units)
            | Q(hiring_faculty__userid=self.request.user.username)
        )
        qs = qs.exclude(deleted=True)

        # "current" contracts filter
        if 'current' in GET and GET['current'] == 'yes':
            today = datetime.date.today()
            slack = 14 # number of days to fudge the start/end
            qs = qs.filter(start_date__lte=today + datetime.timedelta(days=slack),
                           end_date__gte=today - datetime.timedelta(days=slack))

        # search box
        srch = GET.get('sSearch', None)
        if srch:
            # get RA set from haystack, and use it to limit our query.
            ra_qs = SearchQuerySet().models(RAAppointment).filter(text__fuzzy=srch)[:500]
            ra_qs = [r for r in ra_qs if r is not None]
            if ra_qs:
                # ignore very low scores: elasticsearch grabs too much sometimes
                max_score = max(r.score for r in ra_qs)
                ra_pks = (r.pk for r in ra_qs if r.score > max_score/5)
                qs = qs.filter(pk__in=ra_pks)
            else:
                qs = qs.none()

        return qs 
Example #4
Source File: views.py    From coursys with GNU General Public License v3.0 5 votes vote down vote up
def person_info(request):
    """
    Get more info about this person, for AJAX updates on new RA form
    """
    result = {'programs': []}
    emplid = request.GET.get('emplid', None)
    if not emplid or not emplid.isdigit() or len(emplid) != 9:
        pass
    else:
        programs = []
        
        # GradPrograms
        emplid = request.GET['emplid']
        grads = GradStudent.objects.filter(person__emplid=emplid, program__unit__in=request.units)
        for gs in grads:
            pdata = {
                     'program': gs.program.label,
                     'unit': gs.program.unit.name,
                     'status': gs.get_current_status_display(),
                     }
            programs.append(pdata)

        result['programs'] = programs
        
        # other SIMS info
        try:
            otherinfo = more_personal_info(emplid, needed=['citizen', 'visa'])
            result.update(otherinfo)
        except SIMSProblem as e:
            result['error'] = str(e)

    return HttpResponse(json.dumps(result), content_type='application/json;charset=utf-8') 
Example #5
Source File: quick_search.py    From coursys with GNU General Public License v3.0 5 votes vote down vote up
def quick_search(request):
    if 'term' in request.GET:
        term = request.GET['term']
        grads = GradStudent.objects.filter(program__unit__in=request.units) \
                .filter(_get_query(term)) \
                .select_related('person', 'program')[:500] 
                # take more here so the sorting gets more useful students: trim to 50 top later
        
        # sort according to ACTIVE_STATUS_ORDER to get useful students at the top: 
        #   decorate with order, sort, and build jquery response
        grads_sort = [(ACTIVE_STATUS_ORDER[gs.current_status], gs) for gs in grads]
        grads_sort.sort()
        grads_sort = grads_sort[:50]
        
        data = [{'value': str(g.slug), 'label': "%s, %s, %s" % 
                 (g.person.name(), g.program.label, g.get_current_status_display())} 
                 for _,g in grads_sort]
        response = HttpResponse(content_type='application/json')
        json.dump(data, response, indent=1)
        return response
    elif 'search' in request.GET:
        grad_slug = request.GET['search']
        try:
            grad = GradStudent.objects.get(slug=grad_slug, program__unit__in=request.units)
            return HttpResponseRedirect(reverse('grad:view', kwargs={'grad_slug':grad.slug}))
        except GradStudent.DoesNotExist:
            return HttpResponseRedirect(reverse('grad:not_found') + "?search=" + urllib.parse.quote_plus(grad_slug.encode('utf8')))
    else:
        return ForbiddenResponse(request, 'must send term') 
Example #6
Source File: views.py    From mendelmd with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def view(request, pathway_id):
    print(pathway_id)
    
#    pathway_data = kegg_rest_request('get/hsa%s' % (pathway_id))
    pathway = Pathway.objects.get(kegg=pathway_id)

     
    pathway.genes = pathway.genes.split(',')
    print(pathway.genes)
    
#    genes = parse_genes(pathway_data)
#    pathway = {}
#    pathway['name'] = pathway_data.split('\n')[1].replace('NAME', '') 
#    #get gene_ids
#    genelist = []
#    for gene in genes:
#        
#        genelist.append('hsa:%s' % gene['id'])
##        print gene['id']
#    gene_url = '+'.join(genelist)
#    url = '/conv/ncbi-geneid/%s' % (gene_url)
#    results = kegg_rest_request(url)
    #print results
    
    
    #if request.method == 'GET':
    return render_to_response('pathway_analysis/view.html', {'pathway':pathway}, context_instance=RequestContext(request)) 
Example #7
Source File: views.py    From mendelmd with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def view(request, pathway_id):
    print(pathway_id)
    
#    pathway_data = kegg_rest_request('get/hsa%s' % (pathway_id))
    pathway = Pathway.objects.get(kegg=pathway_id)

     
    pathway.genes = pathway.genes.split(',')
    print(pathway.genes)
    
#    genes = parse_genes(pathway_data)
#    pathway = {}
#    pathway['name'] = pathway_data.split('\n')[1].replace('NAME', '') 
#    #get gene_ids
#    genelist = []
#    for gene in genes:
#        
#        genelist.append('hsa:%s' % gene['id'])
##        print gene['id']
#    gene_url = '+'.join(genelist)
#    url = '/conv/ncbi-geneid/%s' % (gene_url)
#    results = kegg_rest_request(url)
    #print results
    
    
    #if request.method == 'GET':
    return render_to_response('pathway_analysis/view.html', {'pathway':pathway}, context_instance=RequestContext(request)) 
Example #8
Source File: views.py    From mendelmd with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def view(request, pathway_id):
    print(pathway_id)
    
#    pathway_data = kegg_rest_request('get/hsa%s' % (pathway_id))
    pathway = Pathway.objects.get(kegg=pathway_id)

     
    pathway.genes = pathway.genes.split(',')
    print(pathway.genes)
    
#    genes = parse_genes(pathway_data)
#    pathway = {}
#    pathway['name'] = pathway_data.split('\n')[1].replace('NAME', '') 
#    #get gene_ids
#    genelist = []
#    for gene in genes:
#        
#        genelist.append('hsa:%s' % gene['id'])
##        print gene['id']
#    gene_url = '+'.join(genelist)
#    url = '/conv/ncbi-geneid/%s' % (gene_url)
#    results = kegg_rest_request(url)
    #print results
    
    
    #if request.method == 'GET':
    return render_to_response('pathway_analysis/view.html', {'pathway':pathway}, context_instance=RequestContext(request)) 
Example #9
Source File: views.py    From StockSensation with Apache License 2.0 5 votes vote down vote up
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    for pageNum in range(1, 10):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(dicStockNum)+',f_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        print(type(gotTitle))
        for i in range(len(gotTitle)):
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    segList = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
Example #10
Source File: views.py    From StockSensation with Apache License 2.0 5 votes vote down vote up
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir+'/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir+'/StockVisualData/Vect')
    transformer = joblib.load(homedir+'/StockVisualData/Tfidf')

    for pageNum in range(1, 21):
        urlPage = 'http://guba.eastmoney.com/list,' + \
            str(Nb_stock_number)+'_'+str(pageNum)+'.html'
        stockPageRequest = urllib.request.urlopen(urlPage)
        htmlTitleContent = str(stockPageRequest.read(), 'utf-8')
        titlePattern = re.compile(
            '<span class="l3">(.*?)title="(.*?)"(.*?)<span class="l6">(\d\d)-(\d\d)</span>', re.S)
        gotTitle = re.findall(titlePattern, htmlTitleContent)
        for i in range(len(gotTitle)):
            text_predict = []
            for j in range(len(dateCount)):
                if int(gotTitle[i][3]) == dateCount[j][0] and int(gotTitle[i][4]) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(gotTitle[i][1], cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '积极':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消极':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})

# 设置时间数组 
Example #11
Source File: views.py    From donate-wagtail with Mozilla Public License 2.0 5 votes vote down vote up
def dispatch(self, request, *args, **kwargs):
        if kwargs['frequency'] not in constants.FREQUENCIES:
            raise Http404()
        self.payment_frequency = kwargs['frequency']

        # Ensure that the donation amount, currency and source page are legit
        start_form = StartCardPaymentForm(request.GET)
        if not start_form.is_valid():
            return HttpResponseRedirect('/')

        self.amount = start_form.cleaned_data['amount']
        self.currency = start_form.cleaned_data['currency']
        self.source_page = Page.objects.get(pk=start_form.cleaned_data['source_page_id']).specific
        return super().dispatch(request, *args, **kwargs) 
Example #12
Source File: views.py    From coursys with GNU General Public License v3.0 4 votes vote down vote up
def pay_periods(request):
    """
    Calculate number of pay periods between contract start and end dates.
    i.e. number of work days in period / 10
    
    I swear this was easier that doing it in JS, okay?
    """
    day = datetime.timedelta(days=1)
    week = datetime.timedelta(days=7)
    if 'start' not in request.GET or 'end' not in request.GET:
        result = ''
    else:
        st = request.GET['start']
        en = request.GET['end']
        try:
            st = datetime.datetime.strptime(st, "%Y-%m-%d").date()
            en = datetime.datetime.strptime(en, "%Y-%m-%d").date()
        except ValueError:
            result = ''
        else:
            # move start/end into Mon-Fri work week
            if st.weekday() == 5:
                st += 2*day
            elif st.weekday() == 6:
                st += day
            if en.weekday() == 5:
                en -= day
            elif en.weekday() == 6:
                en -= 2*day

            # number of full weeks (until sameday: last same weekday before end date)
            weeks = ((en-st)/7).days
            sameday = st + weeks*week
            assert sameday <= en < sameday + week
            
            # number of days remaining
            days = (en - sameday).days
            if sameday.weekday() > en.weekday():
                # don't count weekend days in between
                days -= 2
            
            days += 1 # count both start and end days
            result = "%.1f" % ((weeks*5 + days)/10.0)
    
    return HttpResponse(result, content_type='text/plain;charset=utf-8') 
Example #13
Source File: views.py    From coursys with GNU General Public License v3.0 4 votes vote down vote up
def _course_info_staff(request, course_slug):
    """
    Course front page
    """
    course = get_object_or_404(CourseOffering, slug=course_slug)
    member = Member.objects.get(offering=course, person__userid=request.user.username, role__in=['INST','TA','APPR'])
    activities = all_activities_filter(offering=course)
    any_group = True in [a.group for a in activities]
    
    # Non Ajax way to reorder activity, please also see reorder_activity view function for ajax way to reorder
    order = None  
    act = None  
    if 'order' in request.GET:  
        order = request.GET['order']  
    if 'act' in request.GET:  
        act = request.GET['act']  
    if order and act:  
        reorder_course_activities(activities, act, order)  
        return HttpResponseRedirect(reverse('offering:course_info', kwargs={'course_slug': course_slug}))  


    # Todo: is the activity type necessary?
    activities_info = []
    total_percent = 0
    for activity in activities:
        if activity.percent:
            total_percent += activity.percent

        if isinstance(activity, NumericActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['NG']})            
        elif isinstance(activity, LetterActivity):
            activities_info.append({'activity':activity, 'type':ACTIVITY_TYPE['LG']})

    if len(activities) == 0:
        num_pages = Page.objects.filter(offering=course)
        if num_pages == 0:
            messages.info(request, "Students won't see this course in their menu on the front page. As soon as some activities or pages have been added, they will see a link to the course info page.")
    
    discussion_activity = False
    if course.discussion:
        discussion_activity = discuss_activity.recent_activity(member)

    # advertise combined offering if applicable.
    offer_combined = course.joint_with() and len(activities) == 0
    
    context = {'course': course, 'member': member, 'activities_info': activities_info, 'from_page': FROMPAGE['course'],
               'order_type': ORDER_TYPE, 'any_group': any_group, 'total_percent': total_percent, 'discussion_activity': discussion_activity,
               'offer_combined': offer_combined}
    return render(request, "grades/course_info_staff.html", context) 
Example #14
Source File: views.py    From coursys with GNU General Public License v3.0 4 votes vote down vote up
def formula_tester(request, course_slug):
    course = get_object_or_404(CourseOffering, slug=course_slug)
    numeric_activities = NumericActivity.objects.filter(offering=course, deleted=False)
    result = ""
    
    if 'formula' in request.GET: # If the form has been submitted...
        activity_entries = []
        faked_activities = [] # used to evaluate the formula
        has_error = False
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(request.GET, prefix=numeric_activity.slug)
            if not activity_form_entry.is_valid():
                has_error = True
            else:
                value = activity_form_entry.cleaned_data['value']
                if not value:
                    value = 0
                faked_activities.append(FakeActivity(numeric_activity.name, numeric_activity.short_name,
                                                     activity_form_entry.cleaned_data['status'],
                                                     numeric_activity.max_grade, numeric_activity.percent,
                                                     value))
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
            

        formula_form_entry = FormulaFormEntry(request.GET)
        formula_form_entry.activate_form_entry_validation(course_slug, None)
        
        if not formula_form_entry.is_valid():
            has_error = True
        if has_error:
            messages.error(request, "Please correct the error below")
        else:
            parsed_expr = pickle.loads(formula_form_entry.pickled_formula)
            act_dict = activities_dictionary(faked_activities)
            try:
                result = eval_parse(parsed_expr, FakeEvalActivity(course), act_dict, None, True)
            except EvalException:
                messages.error(request,  "Can not evaluate formula")
    else:
        activity_entries = []
        for numeric_activity in numeric_activities:
            activity_form_entry = ActivityFormEntry(prefix=numeric_activity.slug)
            activity_entries.append(FormulaTesterActivityEntry(numeric_activity, activity_form_entry))
        formula_form_entry = FormulaFormEntry()
    context = {'course': course, 'activity_entries': activity_entries,
               'formula_form_entry': formula_form_entry, 'result': result}
    return render(request, 'grades/formula_tester.html', context) 
Example #15
Source File: views.py    From StockPredict with Apache License 2.0 4 votes vote down vote up
def dicopinionResult(request):
    dicStockNum = request.GET['dicStockNum']
    dateCount = setDate()
    stock_name = get_stock_name(dicStockNum)

    # 爬取10页 后续改为异步爬取
    for pageNum in range(1, 10):
        print(f'page:{pageNum}')
        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(dicStockNum) + ',f_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 处理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            for j in range(len(dateCount)):  # 5天

                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1  # 数组的最后一个数+1,计算出现了一次,今天的标题
                    segList = list(jieba.cut(title, cut_all=True))  # 分词后保存
                    # print(tx_npl(gotTitle[i][1]))
                    for eachItem in segList:
                        if eachItem != ' ':
                            if eachItem in positiveWord:  # 粗暴 简单
                                dateCount[j][2] += 1
                                continue
                            elif eachItem in negativeWord:
                                dateCount[j][3] += 1
                                continue
                            elif eachItem in neutralWord:
                                dateCount[j][4] += 1

                # print(f'{month}月{day}日:条数{len(segList)}')

    # 最近5天的数据
    print(dateCount)
    return render(request, 'dicopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)}) 
Example #16
Source File: views.py    From StockPredict with Apache License 2.0 4 votes vote down vote up
def nbopinionResult(request):
    Nb_stock_number = request.GET['Nb_stock_number']
    dateCount = setDate()
    stock_name = get_stock_name(Nb_stock_number)
    homedir = os.getcwd()

    clf = joblib.load(homedir + '/StockVisualData/Clf.pkl')
    vectorizer = joblib.load(homedir + '/StockVisualData/Vect')
    transformer = joblib.load(homedir + '/StockVisualData/Tfidf')

    for pageNum in range(1, 10):

        urlPage = 'http://guba.eastmoney.com/list,' + \
                  str(Nb_stock_number) + '_' + str(pageNum) + '.html'
        stockPageRequest = requests.get(urlPage, headers=headers)
        htmlTitleContent = stockPageRequest.text

        resp = Selector(text=htmlTitleContent)
        nodes = resp.xpath(
            '//div[contains(@class,"articleh normal_post") or contains(@class,"articleh normal_post odd")]')

        for index, item in enumerate(nodes):
            view = item.xpath('./span[@class="l1 a1"]/text()').extract_first()
            comment_count = item.xpath('./span[@class="l2 a2"]/text()').extract_first()
            title = item.xpath('./span[@class="l3 a3"]/a/text()').extract_first()
            author = item.xpath('./span[@class="l4 a4"]/a/text()').extract_first()
            create_time = item.xpath('./span[@class="l5 a5"]/text()').extract_first()
            # 处理日期
            date_pattern = re.search('(\d+)-(\d+)', create_time)

            month = sub_zero(date_pattern.group(1))

            day = sub_zero(date_pattern.group(2))

            text_predict = []
            for j in range(len(dateCount)):
                if int(month) == dateCount[j][0] and int(day) == dateCount[j][1]:
                    dateCount[j][5] += 1
                    seg_list = list(jieba.cut(title, cut_all=True))
                    seg_text = " ".join(seg_list)
                    text_predict.append(seg_text)
                    text_predict = np.array(text_predict)
                    text_frequency = vectorizer.transform(text_predict)
                    new_tfidf = transformer.transform(text_frequency)
                    predicted = clf.predict(new_tfidf)
                    if predicted == '积极':
                        dateCount[j][2] += 1
                        continue
                    elif predicted == '消极':
                        dateCount[j][3] += 1
                        continue
                    elif predicted == '中立':
                        dateCount[j][4] += 1
                    # 没有返回分数

    return render(request, 'nbopinionResult.html', {'stock_name': stock_name, 'dateCount': json.dumps(dateCount)})


# 设置时间数组