Skip to content

Commit f9289da

Browse files
committed
Add method and tests for getting editorial articles from the texts category
1 parent fc9b763 commit f9289da

File tree

2 files changed

+384
-0
lines changed

2 files changed

+384
-0
lines changed

loading_api_wrapper/api.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,16 @@
55
API_URL = "https://api.loading.se"
66
API_VERSION = "v1"
77
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; rv:91.0) Gecko/20100101 Firefox/91.0"
8+
EDITORIAL_POST_TYPES = [
9+
"neRegular",
10+
"review",
11+
"opinion",
12+
"update",
13+
"podcast",
14+
"conversation",
15+
]
16+
17+
EDITORIAL_SORT = ["title"]
818

919

1020
class LoadingApiWrapper:
@@ -162,3 +172,34 @@ def get_other(self, page=None):
162172
thread_data = self._get_threads_in_forum_category(category_name, page)
163173

164174
return thread_data
175+
176+
def get_editorials(self, page=None, post_type=None, sort=None):
177+
url = f"{API_URL}/{API_VERSION}/posts/"
178+
headers = {
179+
"User-Agent": USER_AGENT,
180+
"texts": "texts",
181+
"post-type": "neRegular",
182+
}
183+
184+
if post_type and post_type in EDITORIAL_POST_TYPES:
185+
headers["post-type"] = post_type
186+
187+
if sort and sort in EDITORIAL_SORT:
188+
headers["sort"] = sort
189+
190+
# Chooses a specific page instead of the first page which is the default page.
191+
if page and page > 1:
192+
headers["page"] = str(page)
193+
194+
# Doing this checks to make sure it only return data from a page that exists.
195+
if page and page < 1:
196+
return {"code": 404, "post": {"posts": [], "users": []}}
197+
198+
response = requests.get(url, headers=headers)
199+
data = response.json()
200+
201+
# Page out of range.
202+
if not len(data["posts"]):
203+
return {"code": 404, "post": data}
204+
205+
return {"code": 200, "post": data}

0 commit comments

Comments
 (0)