Python get posts

60 Python code examples are found related to " get posts". You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: users.py    From circleci-demo-python-flask with MIT License 6 votes vote down vote up
def get_user_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['CIRCULATE_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 2
Source File: posts.py    From circleci-demo-python-flask with MIT License 6 votes vote down vote up
def get_posts():
    page = request.args.get('page', 1, type=int)
    pagination = Post.query.paginate(
        page, per_page=current_app.config['CIRCULATE_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 3
Source File: users.py    From circleci-demo-python-flask with MIT License 6 votes vote down vote up
def get_user_followed_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['CIRCULATE_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_followed_posts', page=page-1,
                       _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_followed_posts', page=page+1,
                       _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 4
Source File: instaloader.py    From instaloader with MIT License 6 votes vote down vote up
def get_location_posts(self, location: str) -> Iterator[Post]:
        """Get Posts which are listed by Instagram for a given Location.

        :return:  Iterator over Posts of a location's posts
        :raises LoginRequiredException: If called without being logged in.

        .. versionadded:: 4.2

        .. versionchanged:: 4.2.9
           Require being logged in (as required by Instagram)
        """
        has_next_page = True
        end_cursor = None
        while has_next_page:
            if end_cursor:
                params = {'__a': 1, 'max_id': end_cursor}
            else:
                params = {'__a': 1}
            location_data = self.context.get_json('explore/locations/{0}/'.format(location),
                                                  params)['graphql']['location']['edge_location_to_media']
            yield from (Post(self.context, edge['node']) for edge in location_data['edges'])
            has_next_page = location_data['page_info']['has_next_page']
            end_cursor = location_data['page_info']['end_cursor'] 
Example 5
Source File: structures.py    From instaloader with MIT License 6 votes vote down vote up
def get_all_posts(self) -> Iterator[Post]:
        """Yields all posts, i.e. all most recent posts and the top posts, in chronological order."""
        sorted_top_posts = iter(sorted(self.get_top_posts(), key=lambda p: p.date_utc, reverse=True))
        other_posts = self.get_posts()
        next_top = next(sorted_top_posts, None)
        next_other = next(other_posts, None)
        while next_top is not None or next_other is not None:
            if next_other is None:
                yield from sorted_top_posts
                break
            if next_top is None:
                yield from other_posts
                break
            if next_top == next_other:
                yield next_top
                next_top = next(sorted_top_posts, None)
                next_other = next(other_posts, None)
                continue
            if next_top.date_utc > next_other.date_utc:
                yield next_top
                next_top = next(sorted_top_posts, None)
            else:
                yield next_other
                next_other = next(other_posts, None) 
Example 6
Source File: instaloader.py    From instaloader with MIT License 6 votes vote down vote up
def get_feed_posts(self) -> Iterator[Post]:
        """Get Posts of the user's feed.

        :return: Iterator over Posts of the user's feed.
        :raises LoginRequiredException: If called without being logged in.
        """

        data = self.context.graphql_query("d6f4427fbe92d846298cf93df0b937d3", {})["data"]

        while True:
            feed = data["user"]["edge_web_feed_timeline"]
            for edge in feed["edges"]:
                node = edge["node"]
                if node.get("__typename") in Post.supported_graphql_types() and node.get("shortcode") is not None:
                    yield Post(self.context, node)
            if not feed["page_info"]["has_next_page"]:
                break
            data = self.context.graphql_query("d6f4427fbe92d846298cf93df0b937d3",
                                              {'fetch_media_item_count': 12,
                                               'fetch_media_item_cursor': feed["page_info"]["end_cursor"],
                                               'fetch_comment_count': 4,
                                               'fetch_like': 10,
                                               'has_stories': False})["data"] 
Example 7
Source File: users.py    From Simpleblog with MIT License 6 votes vote down vote up
def get_user_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 8
Source File: posts.py    From Simpleblog with MIT License 6 votes vote down vote up
def get_posts():
    # posts = Post.query.all()
    # return jsonify({'posts': [post.to_json() for post in posts]})
    page = request.args.get('page', 1, type=int)
    pagination = Post.query.paginate(
        page, per_page=current_app.config['POSTS_PER_PAGE'],
        error_out=False
    )
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 9
Source File: users.py    From Simpleblog with MIT License 6 votes vote down vote up
def get_user_followed_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_followed_posts', page=page-1,
                       _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_followed_posts', page=page+1,
                       _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 10
Source File: posts_stats.py    From python-runtime with Apache License 2.0 6 votes vote down vote up
def get_posts_list_unanswered():
    # Get the list of posts that are unanswered
    query = """
            SELECT
                id, title, tags
            FROM
                `bigquery-public-data.stackoverflow.posts_questions`
            WHERE
                tags LIKE '%python%'
            AND (tags LIKE '%google-cloud-platform%' OR tags LIKE '%gcp%')
            AND accepted_answer_id is NULL
            AND answer_count = 0;
        """

    results = bq_utils.execute_query(query)

    # Add current timestamp to the rows
    date_time = datetime.datetime.now()
    rows = [(date_time,) + row for row in results]

    return rows 
Example 11
Source File: wp_xml_parser.py    From wagtail_blog with Apache License 2.0 6 votes vote down vote up
def get_posts_data(self):
        """
        given a WordPress xml export file, will return list
        of dictionaries with keys that match
        the expected json keys of a wordpress API call
        >>> xp = XML_parser('example_export.xml')
        >>> json_vals = {"slug","ID", "title","description", "content", "author", "terms", "date", }
        >>> data = xp.get_posts_data()
        >>> assert [ val in json_vals for val in data[0].keys() ]
        """
        items = self.chan.findall("item") #(e for e in chan.getchildren() if e.tag=='item')
        # turn item element into a generic dict
        item_dict_gen = (self.item_dict(item) for item in items)
        # transform the generic dict to one with the expected JSON keys
        all_the_data = [self.translate_item(item) for item in item_dict_gen if self.translate_item(item)]
        return all_the_data 
Example 12
Source File: wordpress_import.py    From wagtail_blog with Apache License 2.0 6 votes vote down vote up
def get_posts(self):
        params = {"per_page": self.per_page, "_embed": "1"}
        endpoint = self.url + "/posts"
        resp = requests.get(endpoint, headers=self.get_headers(), params=params)
        total_pages = int(resp.headers.get("X-WP-TotalPages"))
        first_page = json.loads(resp.content)

        for post in first_page:
            self.process_post(post)

        if self.first_page_only is False:
            for i in range(total_pages - 1):
                params["page"] = i + 2
                resp = requests.get(endpoint, headers=self.get_headers(), params=params)
                page = json.loads(resp.content)
                for post in page:
                    self.process_post(post) 
Example 13
Source File: zhihu.py    From zhihu-python with MIT License 6 votes vote down vote up
def get_all_posts(self):
        posts_num = self.get_posts_num()
        if posts_num == 0:
            print "No posts."
            return
            yield
        else:
            for i in xrange((posts_num - 1) / 20 + 1):
                parm = {'limit': 20, 'offset': 20*i}
                url = 'https://zhuanlan.zhihu.com/api/columns/' + self.slug + '/posts'
                headers = {
                    'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36",
                    'Host': "www.zhihu.com",
                    'Origin': "http://www.zhihu.com",
                    'Pragma': "no-cache",
                    'Referer': "http://www.zhihu.com/"
                }
                r = requests.get(url, params=parm, headers=headers, verify=False)
                posts_list = r.json()
                for p in posts_list:
                    post_url = 'https://zhuanlan.zhihu.com/p/' + str(p['slug'])
                    yield Post(post_url) 
Example 14
Source File: reddit.py    From mee6 with MIT License 6 votes vote down vote up
def get_all_subreddits_posts(self):
        all_subreddits = []

        for server in list(self.mee6.servers):
            plugins = await self.mee6.db.redis.smembers('plugins:'+server.id)
            if "Reddit" not in plugins:
                continue

            storage = await self.get_storage(server)
            for subreddit in await storage.smembers('subs'):
                all_subreddits.append(subreddit)

        all_subreddits = set(all_subreddits)
        all_subreddits_posts = {}
        for subreddit in all_subreddits:
            all_subreddits_posts[subreddit] = await self.get_posts(subreddit)

        return all_subreddits_posts 
Example 15
Source File: tootbot-heroku.py    From tootbot with GNU General Public License v3.0 6 votes vote down vote up
def get_reddit_posts(subreddit_info):
    post_dict = {}
    print('[ OK ] Getting posts from Reddit...')
    for submission in subreddit_info.hot(limit=POST_LIMIT):
        if (submission.over_18 and NSFW_POSTS_ALLOWED is False):
            # Skip over NSFW posts if they are disabled in the config file
            print('[ OK ] Skipping', submission.id, 'because it is marked as NSFW')
            continue
        elif (submission.is_self and SELF_POSTS_ALLOWED is False):
            # Skip over NSFW posts if they are disabled in the config file
            print('[ OK ] Skipping', submission.id, 'because it is a self post')
            continue
        elif (submission.spoiler and SPOILERS_ALLOWED is False):
            # Skip over posts marked as spoilers if they are disabled in the config file
            print('[ OK ] Skipping', submission.id, 'because it is marked as a spoiler')
            continue
        elif (submission.stickied):
            print('[ OK ] Skipping', submission.id, 'because it is stickied')
            continue
        else:
            # Create dict
            post_dict[submission.id] = submission
    return post_dict 
Example 16
Source File: get_posts.py    From collect-social with MIT License 6 votes vote down vote up
def get_posts(graph, db, page_id):
    limit = 20

    kwargs = {
        'path': '/' + str(page_id) + '/posts',
        'limit': limit,
        'page': True
    }

    post_data_pages = graph.get(**kwargs)
    for post_data in post_data_pages:
        posts_data = post_data['data']

        for post in posts_data:
            update_post(db, post, page_id)

        print('Updated 100 posts') 
Example 17
Source File: bwdata.py    From api_sdk with MIT License 6 votes vote down vote up
def get_fb_posts(self, name=None, startDate=None, **kwargs):
        """
        Retrieves the facebook posts component data.

        Args:
            name:           You must pass in a channel / group name (string).
            startDate:      You must pass in a start date (string).

            kwargs:         All other filters are optional and can be found in filters.py.

        Returns:
            A list of facebook authors, each having a dictionary representation of their respective facebook data
        """
        params = self._fill_params(name, startDate, kwargs)
        return self.project.get(endpoint="data/mentions/facebookposts", params=params)[
            "results"
        ] 
Example 18
Source File: bwdata.py    From api_sdk with MIT License 6 votes vote down vote up
def get_ig_posts(self, name=None, startDate=None, **kwargs):
        """
        Retrieves the instagram posts component data.

        Args:
            name:           You must pass in a channel / group name (string).
            startDate:      You must pass in a start date (string).

            kwargs:         All other filters are optional and can be found in filters.py.

        Returns:
            A list of instagram authors, each having a dictionary representation of their respective instagram data
        """

        params = self._fill_params(name, startDate, kwargs)
        return self.project.get(endpoint="data/mentions", params=params)["results"] 
Example 19
Source File: methods.py    From hivemind with MIT License 6 votes vote down vote up
def get_ranked_posts(context, sort, start_author='', start_permlink='',
                           limit=20, tag=None, observer=None):
    """Query posts, sorted by given method."""

    db = context['db']
    observer_id = await get_account_id(db, observer) if observer else None

    assert sort in ['trending', 'hot', 'created', 'promoted',
                    'payout', 'payout_comments', 'muted'], 'invalid sort'
    ids = await cursor.pids_by_ranked(
        context['db'],
        sort,
        valid_account(start_author, allow_empty=True),
        valid_permlink(start_permlink, allow_empty=True),
        valid_limit(limit, 100),
        valid_tag(tag, allow_empty=True),
        observer_id)

    return await load_posts(context['db'], ids) 
Example 20
Source File: tasks.py    From CLAtoolkit with GNU General Public License v3.0 6 votes vote down vote up
def get_posts(topic_url):
    posts = []

    soup = make_soup(topic_url)

    posts_container = soup.findAll("ul", "forums")

    post_containers = posts_container[0].findAll("li")
    for post in post_containers:
        if post.find("span", "bbp-reply-post-date") is not None:
            post_date = post.find("span", "bbp-reply-post-date").string
            post_permalink = post.find("a", "bbp-reply-permalink").attrs['href']
            post_user_link = post.find("a", "bbp-author-avatar").attrs['href']
            post_content = str(post.find("div", "bbp-reply-content"))
            #post_content = message.decode('utf-8').encode('ascii', 'ignore') #post_content.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u2013", "-").replace(u"\ud83d", " ").replace(u"\ude09", " ").replace(u"\u00a0l", " ").replace(u"\ud83d", " ").replace(u"\u2026", " ").replace(u"\ude09", " ").replace(u"\u00a0"," ")

            #print "post_content_div", post_content
            #post_content = post_content_div.renderContents() #post_content_div.string #post_content_div.find("p").string
            #print "renderContents", post_content.renderContents()
            post_dict = {'post_permalink': post_permalink, 'post_user_link': post_user_link, 'post_date': post_date, 'post_content': post_content }
            posts.append(post_dict)

    return posts 
Example 21
Source File: users.py    From flasky-first-edition with MIT License 6 votes vote down vote up
def get_user_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_posts', id=id, page=page-1,
                       _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_posts', id=id, page=page+1,
                       _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 22
Source File: posts.py    From flasky-first-edition with MIT License 6 votes vote down vote up
def get_posts():
    page = request.args.get('page', 1, type=int)
    pagination = Post.query.paginate(
        page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 23
Source File: users.py    From flasky-first-edition with MIT License 6 votes vote down vote up
def get_user_followed_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_user_followed_posts', id=id, page=page-1,
                       _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_user_followed_posts', id=id, page=page+1,
                       _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 24
Source File: finder.py    From Interesting-People-On-Medium with MIT License 6 votes vote down vote up
def get_list_of_latest_posts_ids(usernames):

    print('Retrieving the latest posts...')

    post_ids = []

    for username in usernames:
        url = MEDIUM + '/@' + username + '/latest?format=json'
        response = requests.get(url)
        response_dict = clean_json_response(response)

        try:
            posts = response_dict['payload']['references']['Post']
        except:
            posts = []

        if posts:
            for key in posts.keys():
                post_ids.append(posts[key]['id'])

    return post_ids


# Returns the list of post responses of a list of posts that are no older than 1 month 
Example 25
Source File: hw3-3-blogPostDAO.py    From learning-mongodb with Eclipse Public License 1.0 6 votes vote down vote up
def get_posts(self, num_posts):

        # XXX HW 3.2 Work here to get the posts
        cursor = self.posts.find().sort([( "date", DESCENDING)]).limit( num_posts)

        l = []

        for post in cursor:
            post['date'] = post['date'].strftime("%A, %B %d %Y at %I:%M%p") # fix up date
            if 'tags' not in post:
                post['tags'] = [] # fill it in if its not there already
            if 'comments' not in post:
                post['comments'] = []

            l.append({'title':post['title'], 'body':post['body'], 'post_date':post['date'],
                      'permalink':post['permalink'],
                      'tags':post['tags'],
                      'author':post['author'],
                      'comments':post['comments']})

        return l


    # find a post corresponding to a particular permalink 
Example 26
Source File: util.py    From bot with GNU General Public License v3.0 6 votes vote down vote up
def get_number_of_posts(browser):
    """Get the number of posts from the profile screen"""
    try:
        num_of_posts = getUserData(
            "graphql.user.edge_owner_to_timeline_media.count", browser
        )
    except WebDriverException:
        try:
            num_of_posts_txt = browser.find_element_by_xpath(
                read_xpath(get_number_of_posts.__name__, "num_of_posts_txt")
            ).text

        except NoSuchElementException:
            num_of_posts_txt = browser.find_element_by_xpath(
                read_xpath(
                    get_number_of_posts.__name__, "num_of_posts_txt_no_such_element"
                )
            ).text

        num_of_posts_txt = num_of_posts_txt.replace(" ", "")
        num_of_posts_txt = num_of_posts_txt.replace(",", "")
        num_of_posts = int(num_of_posts_txt)

    return num_of_posts 
Example 27
Source File: util.py    From blog-a with MIT License 6 votes vote down vote up
def get_labels_of_posts(label_type):
    """
    Get labels (tags or categories) of all posts

    :param label_type: 'tags' or 'categories'
    :return: list of labels with post count
    """
    file_list = get_posts_list()
    count_dict = {}
    for file in file_list:
        file_path = os.path.join('posts', file)
        yml_dict = render_yaml(read_md_file_head(file_path))
        if yml_dict:
            labels = to_list(yml_dict[label_type])
            for label in labels:
                count_dict[label] = count_dict.get(label, 0) + 1
    result = []
    for key in count_dict.keys():
        result.append(dict(name=key, count=count_dict[key]))
    return result 
Example 28
Source File: account.py    From TTBot with GNU General Public License v3.0 6 votes vote down vote up
def get_posts(self,ALL=True,MDB=None,count=COUNT_POSTS):
        headers = deepcopy(self.login_headers)
        headers.update(HEADERS)
        return {
            'params_func': lambda x,y:{'max_time':x,'has_more':y,'count':20},
            'var': {
                'max_time': int(time.time()),
                'has_more': True,
            },
            'more': 'has_more',
            'more_out':'data',
            'var_outer':'data',
            'item_out':'thread_list',
            'db_setup': {
                'tname': f'{self.name+"_"+self.user_id+"_"+time_to_date(time.time(),format="%Y-%m-%d")}',
                'db': MONGODB['comments'],
                'ticket': 'comment_id',
            },
            'request_kwargs': {
                'headers': headers,
                'JSON': 1,
            },
        } 
Example 29
Source File: users.py    From flasky-with-celery with MIT License 6 votes vote down vote up
def get_user_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 30
Source File: users.py    From flasky-with-celery with MIT License 6 votes vote down vote up
def get_user_followed_posts(id):
    user = User.query.get_or_404(id)
    page = request.args.get('page', 1, type=int)
    pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
        page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
        error_out=False)
    posts = pagination.items
    prev = None
    if pagination.has_prev:
        prev = url_for('api.get_posts', page=page-1, _external=True)
    next = None
    if pagination.has_next:
        next = url_for('api.get_posts', page=page+1, _external=True)
    return jsonify({
        'posts': [post.to_json() for post in posts],
        'prev': prev,
        'next': next,
        'count': pagination.total
    }) 
Example 31
Source File: convert_medium.py    From xlinkBook with MIT License 6 votes vote down vote up
def get_list_of_latest_posts_ids(usernames):

    #print('Retrieving the latest posts...')

    post_ids = []

    for username in usernames:
        url = MEDIUM + '/@' + username + '/latest?format=json'
        response = requestGet(url)
        response_dict = clean_json_response(response)

        try:
            posts = response_dict['payload']['references']['Post']
        except:
            posts = []

        if posts:
            for key in posts.keys():
                post_ids.append(posts[key]['id'])

    return post_ids


# Returns the list of post responses of a list of posts that are no older than 1 month 
Example 32
Source File: convert_medium.py    From xlinkBook with MIT License 6 votes vote down vote up
def get_list_of_latest_posts(usernames):

    result = []

    for username in usernames:
        url = MEDIUM + '/@' + username + '/latest?format=json'
        response = requestGet(url)
        response_dict = clean_json_response(response)

        try:
            posts = response_dict['payload']['references']['Post']
        except:
            posts = []

        if posts:
            for key in posts.keys():
                result.append(posts[key])

    return result


# Returns the list of IDs of the latest posts of a list of users 
Example 33
Source File: convert_zhihu.py    From xlinkBook with MIT License 6 votes vote down vote up
def getPosts(user, postType='members'):
    for offset in range(0, 1000, 20):
        url = 'https://www.zhihu.com/api/v4/' + postType + '/' + user + '/articles?include=data%5B*%5D.comment_count%2Csuggest_edit%2Cis_normal%2Cthumbnail_extra_info%2Cthumbnail%2Ccan_comment%2Ccomment_permission%2Cadmin_closed_comment%2Ccontent%2Cvoteup_count%2Ccreated%2Cupdated%2Cupvoted_followees%2Cvoting%2Creview_info%3Bdata%5B*%5D.author.badge%5B%3F(type%3Dbest_answerer)%5D.topics&offset=' + str(offset) + '&limit=20&sort_by=created'
        headers = {'authorization' : 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',
                    'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'}

        #print url
        r = requests.get(url, headers=headers)

        if r.status_code == 200:
            jobj = json.loads(r.text)

            if jobj.has_key('data') and len(jobj['data']) > 0:

                for item in jobj['data']:

                    line = ' | ' + item['title'] + ' | ' + item['url'] + ' | '
                    print line.encode('utf-8')
            else:
                break
        else:
            break 
Example 34
Source File: lambda_function.py    From ask-alexa-pykit with MIT License 6 votes vote down vote up
def get_posts_intent_handler(request):    

    def resolve_slots(text):
        if text in useful_science.categories:
            return text        
        return 'new'
    
    category_text = request.slots['Category']
    category = resolve_slots(category_text)
    post = useful_science.post_cache.get_post(category)

    card_content = "{0} Link: {1}".format(post['summary'],
                                          post['permalink'])

    card = alexa.create_card(title=post['meta_title'],
                             subtitle=post['categories'],
                             content=card_content)
    
    return alexa.create_response(message=post['summary'],
                                 end_session=True,
                                 card_obj=card) 
Example 35
Source File: structures.py    From instaloader with MIT License 5 votes vote down vote up
def get_posts(self) -> Iterator[Post]:
        """Retrieve all posts from a profile."""
        self._obtain_metadata()
        yield from (Post(self._context, node, self) for node in
                    self._context.graphql_node_list("472f257a40c653c64c666ce877d59d2b",
                                                    {'id': self.userid},
                                                    'https://www.instagram.com/{0}/'.format(self.username),
                                                    lambda d: d['data']['user']['edge_owner_to_timeline_media'],
                                                    self._rhx_gis,
                                                    self._metadata('edge_owner_to_timeline_media'))) 
Example 36
Source File: structures.py    From instaloader with MIT License 5 votes vote down vote up
def get_saved_posts(self) -> Iterator[Post]:
        """Get Posts that are marked as saved by the user."""

        if self.username != self._context.username:
            raise LoginRequiredException("--login={} required to get that profile's saved posts.".format(self.username))

        self._obtain_metadata()
        yield from (Post(self._context, node) for node in
                    self._context.graphql_node_list("f883d95537fbcd400f466f63d42bd8a1",
                                                    {'id': self.userid},
                                                    'https://www.instagram.com/{0}/'.format(self.username),
                                                    lambda d: d['data']['user']['edge_saved_media'],
                                                    self._rhx_gis,
                                                    self._metadata('edge_saved_media'))) 
Example 37
Source File: structures.py    From instaloader with MIT License 5 votes vote down vote up
def get_igtv_posts(self) -> Iterator[Post]:
        """Retrieve all IGTV posts.

        .. versionadded:: 4.3"""
        self._obtain_metadata()
        yield from (Post(self._context, node, self) for node in
                    self._context.graphql_node_list('bc78b344a68ed16dd5d7f264681c4c76',
                                                    {'id': self.userid},
                                                    'https://www.instagram.com/{0}/channel/'.format(self.username),
                                                    lambda d: d['data']['user']['edge_felix_video_timeline'],
                                                    self._rhx_gis,
                                                    self._metadata('edge_felix_video_timeline'))) 
Example 38
Source File: structures.py    From instaloader with MIT License 5 votes vote down vote up
def get_tagged_posts(self) -> Iterator[Post]:
        """Retrieve all posts where a profile is tagged.

        .. versionadded:: 4.0.7"""
        self._obtain_metadata()
        yield from (Post(self._context, node, self if int(node['owner']['id']) == self.userid else None) for node in
                    self._context.graphql_node_list("e31a871f7301132ceaab56507a66bbb7",
                                                    {'id': self.userid},
                                                    'https://www.instagram.com/{0}/'.format(self.username),
                                                    lambda d: d['data']['user']['edge_user_to_photos_of_you'],
                                                    self._rhx_gis)) 
Example 39
Source File: instaloader.py    From instaloader with MIT License 5 votes vote down vote up
def get_hashtag_posts(self, hashtag: str) -> Iterator[Post]:
        """Get Posts associated with a #hashtag.

        .. deprecated:: 4.4
           Use :meth:`Hashtag.get_posts`."""
        return Hashtag.from_name(self.context, hashtag).get_posts() 
Example 40
Source File: instaloader.py    From instaloader with MIT License 5 votes vote down vote up
def get_explore_posts(self) -> Iterator[Post]:
        """Get Posts which are worthy of exploring suggested by Instagram.

        :return: Iterator over Posts of the user's suggested posts.
        :raises LoginRequiredException: If called without being logged in.
        """
        data = self.context.get_json('explore/', {})
        yield from (Post(self.context, node)
                    for node in self.context.graphql_node_list("df0dcc250c2b18d9fd27c5581ef33c7c",
                                                               {}, 'https://www.instagram.com/explore/',
                                                               lambda d: d['data']['user']['edge_web_discover_media'],
                                                               data.get('rhx_gis'))) 
Example 41
Source File: api.py    From PyMedium with MIT License 5 votes vote down vote up
def get_user_or_publication_posts(name):
    if name.startswith("@"):
        count = get_count_parameter()
        return process_post_request(const.ROOT_URL + "{0}/latest?limit={count}".format(name, count=count))
    else:
        return process_post_request(const.ROOT_URL + name) 
Example 42
Source File: instagram_impl.py    From maskrcnn-modanet with MIT License 5 votes vote down vote up
def get_posts(self, limit: Optional[int] = None, offset: Optional[int] = 0) -> Iterator[Post]:
		"""Retrieve all posts from a profile."""
		self._obtain_metadata()
		if limit:
			# yield from (Post(self._context, next(self._context.graphql_node_list("472f257a40c653c64c666ce877d59d2b",
			#                                                                 {'id': self.userid},
			#                                                                 'https://www.instagram.com/{0}/'.format(self.username),
			#                                                                 lambda d: d['data']['user']['edge_owner_to_timeline_media'],
			#                                                                 self._rhx_gis,
			#                                                                 self._metadata('edge_owner_to_timeline_media'))), self)
			#             for i in range(min(limit) )

			posts = []
			for node_index, node in enumerate(self._context.graphql_node_list("472f257a40c653c64c666ce877d59d2b",
																		{'id': self.userid},
																		'https://www.instagram.com/{0}/'.format(self.username),
																		lambda d: d['data']['user']['edge_owner_to_timeline_media'],
																		self._rhx_gis,
																		self._metadata('edge_owner_to_timeline_media'))):
				if node_index < offset:
					continue
				elif node_index == limit + offset:
					break
				else:
					posts.append(Post(self._context, node, self))
			yield from posts
		else:
			yield from (Post(self._context, node, self) for node in
					self._context.graphql_node_list("472f257a40c653c64c666ce877d59d2b",
													{'id': self.userid},
													'https://www.instagram.com/{0}/'.format(self.username),
													lambda d: d['data']['user']['edge_owner_to_timeline_media'],
													self._rhx_gis,
													self._metadata('edge_owner_to_timeline_media'))) 
Example 43
Source File: wrappers.py    From woid with Apache License 2.0 5 votes vote down vote up
def get_top_posts(self):
        today = timezone.now().strftime('%Y-%m-%d')
        r = requests.get('https://api.producthunt.com/v1/posts?day=%s' % today, headers=self.headers)
        data = r.json()
        return data['posts'] 
Example 44
Source File: feed_service.py    From C-3PO with MIT License 5 votes vote down vote up
def get_popular_posts(url, n, start, limit):
        """ Retrieves the most popular posts in the past n days"""
        with session_scope() as session:
            try:
                all_posts = (
                    session.query(UserPosts)
                    .filter(UserPosts.share_date <= datetime.now() + timedelta(days=1))
                    .filter(UserPosts.share_date >= datetime.now() - timedelta(days=n))
                    .order_by(UserPosts.likes_count.desc())
                )
                total = all_posts.count()
                posts = all_posts.offset(start).limit(limit).all()

                paginated_response = get_paginated_response(
                    posts, url, start=start, limit=limit, total=total
                )
                paginated_response["posts"] = [
                    format(session, post) for post in paginated_response["posts"]
                ]

                return paginated_response, 200

            except BaseException:
                LOG.error(
                    f"Failed to fetch data with param n = {n}, start = {start}, limit = {limit} . Try later.",
                    exc_info=True,
                )
                response_object = {
                    "status": "fail",
                    "message": "Try again",
                }
                return response_object, 500 
Example 45
Source File: feed_service.py    From C-3PO with MIT License 5 votes vote down vote up
def get_underrated_posts(url, start, limit):
        with session_scope() as session:
            try:
                total = session.query(UserPosts).count()
                posts = (
                    session.query(UserPosts)
                    .order_by(Song.custom_popularity.asc(), UserPosts.share_date.desc())
                    .join(Link, UserPosts.link_id == Link.id)
                    .join(Song, Link.song_id == Song.id)
                    .offset(start)
                    .limit(limit)
                )

                paginated_response = get_paginated_response(
                    posts, url, total, start=start, limit=limit
                )

                paginated_response["posts"] = [
                    format(session, post) for post in paginated_response["posts"]
                ]

                return paginated_response, 200

            except BaseException:
                LOG.error(
                    f"Failed to fetch data with param start = {start} limit_ = {limit}. Try later.",
                    exc_info=True,
                )
                response_object = {
                    "status": "fail",
                    "message": "Try again",
                }
                return response_object, 500 
Example 46
Source File: feed_service.py    From C-3PO with MIT License 5 votes vote down vote up
def get_frequent_posts(url, start, limit):
        with session_scope() as session:
            try:
                total = session.query(UserPosts).count()
                posts = (
                    session.query(UserPosts)
                    .filter(UserPosts.share_date <= datetime.now())
                    .join(UserPosts.link)
                    .order_by(Link.post_count.desc())
                    .offset(start)
                    .limit(limit)
                    .all()
                )

                paginated_response = get_paginated_response(
                    posts, url, total, start=start, limit=limit
                )
                paginated_response["posts"] = [
                    format(session, post) for post in paginated_response["posts"]
                ]

                return paginated_response, 200

            except BaseException:
                LOG.error(
                    f"Failed to fetch data with param limit_ = {limit}. Try later.",
                    exc_info=True,
                )
                response_object = {
                    "status": "fail",
                    "message": "Try again",
                }
                return response_object, 500 
Example 47
Source File: feed_service.py    From C-3PO with MIT License 5 votes vote down vote up
def get_posts_in_interval(
        url, start, limit, from_=datetime.now() - timedelta(days=7), to_=datetime.now()
    ):
        try:
            session = session_factory()
            query_posts = (
                session.query(UserPosts)
                .filter(UserPosts.share_date >= from_)
                .filter(UserPosts.share_date <= to_)
                .order_by(UserPosts.likes_count.desc())
            )
            total = query_posts.count()
            posts = query_posts.all()

            paginated_response = get_paginated_response(posts, url, total, start, limit)

            paginated_response["posts"] = [
                format(session, post) for post in paginated_response["posts"]
            ]
            return paginated_response, 200

        except BaseException:
            LOG.error(
                f"Failed to fetch data with params from_ = {from_}, to_ = {to_}. Try later.",
                exc_info=True,
            )
            response_object = {
                "status": "fail",
                "message": "Try again",
            }
            return response_object, 500 
Example 48
Source File: feed_service.py    From C-3PO with MIT License 5 votes vote down vote up
def get_latest_posts(url, start, limit):
        with session_scope() as session:
            try:
                total = session.query(UserPosts).count()
                posts = (
                    session.query(UserPosts)
                    .filter(UserPosts.share_date <= datetime.now())
                    .order_by(UserPosts.share_date.desc())
                    .offset(start)
                    .limit(limit)
                    .all()
                )
                paginated_response = get_paginated_response(
                    posts, url, total, start, limit
                )
                paginated_response["posts"] = [
                    format(session, post) for post in paginated_response["posts"]
                ]

                return paginated_response, 200

            except BaseException:
                LOG.error(
                    f"Failed to fetch data with param start = {start}, limit = {limit}. Try later.",
                    exc_info=True,
                )
                response_object = {
                    "status": "fail",
                    "message": "Try again",
                }
                return response_object, 500 
Example 49
Source File: onlyfans.py    From OnlyFans with GNU General Public License v3.0 5 votes vote down vote up
def get_paid_posts(session,app_token):
    paid_api = "https://onlyfans.com/api2/v2/posts/paid?limit=100&offset=0&app-token="+app_token+""
    directory = []
    directory.append
    x = media_scraper(paid_api,session)
    print 
Example 50
Source File: index.py    From BikeMaps with MIT License 5 votes vote down vote up
def get_Posts_By_Language_Code(language_code):
        if language_code == 'fr':
                return Post.objects.filter(language='fr', published=True).order_by('-post_date')
        else:
                return Post.objects.filter(published=True).exclude(language='fr').order_by('-post_date') 
Example 51
Source File: post.py    From white with GNU General Public License v2.0 5 votes vote down vote up
def get_published_posts_page(self, page=1, perpage=10, category=None):
        cid = None
        if category:
            real_category = self.category_repo.find_by_slug(category)
            if not real_category:
                return Paginator([], 0, page, perpage, '/category/' + category)
            cid = real_category.cid

        total = self.post_repo.category_count(cid)
        pages = self.post_repo.get_published_posts(page, perpage, cid)
        url = 'category/' + category if category else '/posts'
        pagination = Paginator(pages, total, page, perpage, url)
        return total, pagination 
Example 52
Source File: post.py    From white with GNU General Public License v2.0 5 votes vote down vote up
def get_published_posts(self, page=1, perpage=10, category=None):
        q = db.select(self.table).fields('title', 'slug', 'description', 'html', 'css', 'js',
                                         'category', 'status', 'allow_comment', 'author', 'updated', 'created', 'pid')
        if category:
            q.condition('category', category)
        results = (q.limit(perpage).offset((page - 1) * perpage).condition('status', 'published')
                    .order_by('created', 'DESC').execute())
        return [self.load(data, self.model) for data in results] 
Example 53
Source File: memory_db.py    From consuming_services_python_demos with MIT License 5 votes vote down vote up
def get_posts(ip_address):
        MemoryDb.ensure_base_data()
        posts = []
        posts.extend(MemoryDb.base_data)
        if ip_address in MemoryDb.data_lookup:
            posts.extend(MemoryDb.data_lookup[ip_address])

        posts.sort(key=lambda p: p.published)
        posts.reverse()

        return posts 
Example 54
Source File: zhihu.py    From zhihu-python with MIT License 5 votes vote down vote up
def get_posts_num(self):
        if self.meta == None:
            self.parser()
        meta = self.meta
        posts_num = int(meta['postsCount'])
        return posts_num 
Example 55
Source File: engine.py    From Flask-Blogging with MIT License 5 votes vote down vote up
def get_posts(self, count=10, offset=0, recent=True, tag=None,
                  user_id=None, include_draft=False, render=False):
        posts = self.storage(count, offset, recent, tag, user_id,
                             include_draft)
        for post in posts:
            self.process_post(post, render=False) 
Example 56
Source File: code.py    From facebook-birthday-response with MIT License 5 votes vote down vote up
def get_posts(url, wishes=None):
    #check if we are done
    if wishes is None:
        wishes = []
        stop = False
    else:
        until = parse_qs(urlparse(url).query).get('until')
        stop = int(until[0]) < utc_bday

    if stop:
        return wishes
    else:
        print url
        req = requests.get(url, proxies=proxy_dict)
        if req.status_code == 200:
            
            content = req.json()
            
            #keep only relevant fields from post data
            feed = []
            for post in content['data']:
                feed.append({'id': post['id'],'from': post['from']['name'],'message': post.get('message', ''),'type': post['type']})

            #keep only posts relevant to birthday. Make sure you reply your friends who post happy birthday pictures on your timeline or posts in local language
            for post in feed:
                if post['type']=='status' and is_birthday(post['message'], use_filter) :
                    wishes.append(post)
            
            next_url = content['paging']['next']
            
            return get_posts(next_url, wishes)
        else:
            print "Unable to connect. Check if session is still valid" 
Example 57
Source File: libposts.py    From vulpy with MIT License 5 votes vote down vote up
def get_posts(username):

    conn = sqlite3.connect('db_posts.sqlite')
    conn.set_trace_callback(print)
    conn.row_factory = sqlite3.Row
    c = conn.cursor()

    rows = c.execute("SELECT * FROM posts WHERE username = ? ORDER BY date DESC", (username,)).fetchall()

    posts = [ dict(zip(row.keys(), row)) for row in rows ]

    return posts 
Example 58
Source File: download_reddit_qalist.py    From ParlAI with MIT License 5 votes vote down vote up
def get_posts(post_ids):
    posts = ','.join(post_ids)
    post_ids_link = f'https://api.pushshift.io/reddit/search/submission/?ids={posts}'
    req = requests.get(post_ids_link)
    post_json = json.loads(req.text)
    # one pushshift page can support up to ~170 comments
    for post in post_json['data']:
        yield post['subreddit'].lower(), json.dumps(post)


# download a file, extract comments from desired ids
# additionally return subreddits corresponding to comments 
Example 59
Source File: reddit.py    From mee6 with MIT License 5 votes vote down vote up
def get_posts(self, subreddit):
        """Gets the n last posts of a subreddit

        Args:
            subreddit: Subbredit name
            n: The number of posts you want

        Returns:
            A list of posts
        """

        url = "https://www.reddit.com/r/{}/new.json".format(subreddit)
        posts = []

        try:
            with aiohttp.ClientSession() as session:
                async with session.get(url) as resp:
                    if resp.status == 200:
                        json = await resp.json()
                        posts = json['data']['children']
                        posts = list(map(lambda p: p['data'], posts))
        except Exception as e:
            log.info("Cannot get posts from {}".format(subreddit))
            log.info(e)
            return []

        return posts[:2]