Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

setup CI & CD #27

Merged
merged 5 commits into from
Mar 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .github/workflows/publish.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: Build distribution

on: [push, pull_request]

jobs:
test:
runs-on: "ubuntu-latest"

steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v3
with:
python-version: 3.8

- name: Install build dependencies
run: python -m pip install build wheel

- name: Build distributions
shell: bash -l {0}
run: python -m build

- name: Publish package to PyPI
if: github.repository == 'Linusp/python-inoreader' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@master
with:
user: __token__
password: ${{ secrets.pypi_password }}
26 changes: 5 additions & 21 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,28 +26,12 @@ repos:
^.pytest_cache/
)

- repo: https://github.com/PyCQA/isort
rev: 5.12.0
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.3
hooks:
- id: isort

- repo: https://github.com/psf/black
rev: 22.6.0
hooks:
- id: black

- repo: https://github.com/PyCQA/flake8
rev: 6.1.0
hooks:
- id: flake8
additional_dependencies:
- setuptools
- flake8-bugbear
- flake8-comprehensions
- flake8-debugger
- flake8-logging-format
- flake8-pytest-style
- flake8-tidy-imports
- id: ruff
args: [ --fix ]
- id: ruff-format

- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
Expand Down
20 changes: 16 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
lint: clean
flake8 inoreader --format=pylint
- pip install ruff codespell -q
- ruff check inoreader/
- codespell

format:
- pip install ruff -q
- ruff format inoreader/

clean:
- find . -iname "*__pycache__" | xargs rm -rf
Expand All @@ -12,6 +18,12 @@ clean:
venv:
- virtualenv --python=$(shell which python3) --prompt '<venv:inoreader>' venv

deps:
- pip install -U pip setuptools
- pip install -r requirements.txt
lock-requirements:
- pip install pip-tools -q
- pip-compile -o requirements.txt

deps: lock-requirements
- pip-sync

build: lint test
- python -m build
2 changes: 1 addition & 1 deletion inoreader/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# coding: utf-8
from .client import InoreaderClient

__all__ = ['InoreaderClient']
__all__ = ["InoreaderClient"]
24 changes: 12 additions & 12 deletions inoreader/article.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def __init__(
self.categories = categories
self.link = link
self.published = published
self.content = content.strip() if content else ''
self.content = content.strip() if content else ""
self.text = extract_text(self.content)
self.author = author
self.feed_id = feed_id
Expand All @@ -33,22 +33,22 @@ def __init__(
@classmethod
def from_json(cls, data):
article_data = {
'id': data['id'],
'title': data['title'],
'categories': data['categories'],
'published': data['published'],
'content': data.get('summary', {}).get('content'),
'author': data.get('author'),
"id": data["id"],
"title": data["title"],
"categories": data["categories"],
"published": data["published"],
"content": data.get("summary", {}).get("content"),
"author": data.get("author"),
}
links = [item['href'] for item in data['canonical']]
article_data['link'] = links[0] if links else ''
links = [item["href"] for item in data["canonical"]]
article_data["link"] = links[0] if links else ""

# feed info
article_data.update(
{
'feed_id': data['origin']['streamId'],
'feed_title': normalize_whitespace(data['origin']['title']),
'feed_link': data['origin']['htmlUrl'],
"feed_id": data["origin"]["streamId"],
"feed_title": normalize_whitespace(data["origin"]["title"]),
"feed_link": data["origin"]["htmlUrl"],
}
)

Expand Down
126 changes: 63 additions & 63 deletions inoreader/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,20 +25,20 @@

class InoreaderClient(object):
# paths
TOKEN_PATH = '/oauth2/token'
USER_INFO_PATH = 'user-info'
TAG_LIST_PATH = 'tag/list'
SUBSCRIPTION_LIST_PATH = 'subscription/list'
STREAM_CONTENTS_PATH = 'stream/contents/'
EDIT_TAG_PATH = 'edit-tag'
EDIT_SUBSCRIPTION_PATH = 'subscription/edit'
TOKEN_PATH = "/oauth2/token"
USER_INFO_PATH = "user-info"
TAG_LIST_PATH = "tag/list"
SUBSCRIPTION_LIST_PATH = "subscription/list"
STREAM_CONTENTS_PATH = "stream/contents/"
EDIT_TAG_PATH = "edit-tag"
EDIT_SUBSCRIPTION_PATH = "subscription/edit"

# tags
GENERAL_TAG_TEMPLATE = 'user/-/label/{}'
READ_TAG = 'user/-/state/com.google/read'
STARRED_TAG = 'user/-/state/com.google/starred'
LIKED_TAG = 'user/-/state/com.google/like'
BROADCAST_TAG = 'user/-/state/com.google/broadcast'
GENERAL_TAG_TEMPLATE = "user/-/label/{}"
READ_TAG = "user/-/state/com.google/read"
STARRED_TAG = "user/-/state/com.google/starred"
LIKED_TAG = "user/-/state/com.google/like"
BROADCAST_TAG = "user/-/state/com.google/broadcast"

def __init__(
self, app_id, app_key, access_token, refresh_token, expires_at, config_manager=None
Expand All @@ -51,9 +51,9 @@ def __init__(
self.session = requests.Session()
self.session.headers.update(
{
'AppId': self.app_id,
'AppKey': self.app_key,
'Authorization': 'Bearer {}'.format(self.access_token),
"AppId": self.app_id,
"AppKey": self.app_key,
"Authorization": "Bearer {}".format(self.access_token),
}
)
self.config_manager = config_manager
Expand All @@ -76,16 +76,16 @@ def parse_response(response, json_data=True):
def refresh_access_token(self):
url = urljoin(BASE_URL, self.TOKEN_PATH)
payload = {
'client_id': self.app_id,
'client_secret': self.app_key,
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
"client_id": self.app_id,
"client_secret": self.app_key,
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
}
response = self.parse_response(requests.post(url, json=payload, proxies=self.proxies))
self.access_token = response['access_token']
self.refresh_token = response['refresh_token']
self.expires_at = datetime.now().timestamp() + response['expires_in']
self.session.headers['Authorization'] = 'Bearer {}'.format(self.access_token)
self.access_token = response["access_token"]
self.refresh_token = response["refresh_token"]
self.expires_at = datetime.now().timestamp() + response["expires_in"]
self.session.headers["Authorization"] = "Bearer {}".format(self.access_token)

if self.config_manager:
self.config_manager.access_token = self.access_token
Expand All @@ -103,47 +103,47 @@ def get_folders(self):
self.check_token()

url = urljoin(BASE_URL, self.TAG_LIST_PATH)
params = {'types': 1, 'counts': 1}
params = {"types": 1, "counts": 1}
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))

folders = []
for item in response['tags']:
if item.get('type') != 'folder':
for item in response["tags"]:
if item.get("type") != "folder":
continue

folder_name = item['id'].split('/')[-1]
folders.append({'name': folder_name, 'unread_count': item['unread_count']})
folder_name = item["id"].split("/")[-1]
folders.append({"name": folder_name, "unread_count": item["unread_count"]})

folders.sort(key=itemgetter('name'))
folders.sort(key=itemgetter("name"))
return folders

def get_tags(self):
self.check_token()

url = urljoin(BASE_URL, self.TAG_LIST_PATH)
params = {'types': 1, 'counts': 1}
params = {"types": 1, "counts": 1}
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))

tags = []
for item in response['tags']:
if item.get('type') != 'tag':
for item in response["tags"]:
if item.get("type") != "tag":
continue

folder_name = item['id'].split('/')[-1]
tags.append({'name': folder_name, 'unread_count': item['unread_count']})
folder_name = item["id"].split("/")[-1]
tags.append({"name": folder_name, "unread_count": item["unread_count"]})

tags.sort(key=itemgetter('name'))
tags.sort(key=itemgetter("name"))
return tags

def get_subscription_list(self):
self.check_token()

url = urljoin(BASE_URL, self.SUBSCRIPTION_LIST_PATH)
response = self.parse_response(self.session.get(url, proxies=self.proxies))
for item in response['subscriptions']:
for item in response["subscriptions"]:
yield Subscription.from_json(item)

def get_stream_contents(self, stream_id, c='', limit=None):
def get_stream_contents(self, stream_id, c="", limit=None):
fetched_count = 0
stop = False
while not stop:
Expand All @@ -161,16 +161,16 @@ def get_stream_contents(self, stream_id, c='', limit=None):
if c is None:
break

def __get_stream_contents(self, stream_id, continuation=''):
def __get_stream_contents(self, stream_id, continuation=""):
self.check_token()

url = urljoin(BASE_URL, self.STREAM_CONTENTS_PATH + quote_plus(stream_id))
params = {'n': 50, 'r': '', 'c': continuation, 'output': 'json'} # default 20, max 1000
params = {"n": 50, "r": "", "c": continuation, "output": "json"} # default 20, max 1000
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))
if 'continuation' in response:
return response['items'], response['continuation']
if "continuation" in response:
return response["items"], response["continuation"]
else:
return response['items'], None
return response["items"], None

def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, limit=None):
self.check_token()
Expand All @@ -179,20 +179,20 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
if folder:
url = urljoin(url, quote_plus(self.GENERAL_TAG_TEMPLATE.format(folder)))

params = {'c': str(uuid4())}
params = {"c": str(uuid4())}
if unread:
params['xt'] = self.READ_TAG
params["xt"] = self.READ_TAG

if starred:
params['it'] = self.STARRED_TAG
params["it"] = self.STARRED_TAG

fetched_count = 0
response = self.parse_response(self.session.post(url, params=params, proxies=self.proxies))
for data in response['items']:
for data in response["items"]:
categories = {
category.split('/')[-1]
for category in data.get('categories', [])
if category.find('label') > 0
category.split("/")[-1]
for category in data.get("categories", [])
if category.find("label") > 0
}
if tags and not categories.issuperset(set(tags)):
continue
Expand All @@ -202,17 +202,17 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
if limit and fetched_count >= limit:
break

continuation = response.get('continuation')
continuation = response.get("continuation")
while continuation and (not limit or fetched_count < limit):
params['c'] = continuation
params["c"] = continuation
response = self.parse_response(
self.session.post(url, params=params, proxies=self.proxies)
)
for data in response['items']:
for data in response["items"]:
categories = {
category.split('/')[-1]
for category in data.get('categories', [])
if category.find('label') > 0
category.split("/")[-1]
for category in data.get("categories", [])
if category.find("label") > 0
}
if tags and not categories.issuperset(set(tags)):
continue
Expand All @@ -221,7 +221,7 @@ def fetch_articles(self, folder=None, tags=None, unread=True, starred=False, lim
if limit and fetched_count >= limit:
break

continuation = response.get('continuation')
continuation = response.get("continuation")

def fetch_unread(self, folder=None, tags=None, limit=None):
for article in self.fetch_articles(folder=folder, tags=tags, unread=True):
Expand All @@ -237,7 +237,7 @@ def add_general_label(self, articles, label):
url = urljoin(BASE_URL, self.EDIT_TAG_PATH)
for start in range(0, len(articles), 10):
end = min(start + 10, len(articles))
params = {'a': label, 'i': [articles[idx].id for idx in range(start, end)]}
params = {"a": label, "i": [articles[idx].id for idx in range(start, end)]}
self.parse_response(
self.session.post(url, params=params, proxies=self.proxies), json_data=False
)
Expand All @@ -248,7 +248,7 @@ def remove_general_label(self, articles, label):
url = urljoin(BASE_URL, self.EDIT_TAG_PATH)
for start in range(0, len(articles), 10):
end = min(start + 10, len(articles))
params = {'r': label, 'i': [articles[idx].id for idx in range(start, end)]}
params = {"r": label, "i": [articles[idx].id for idx in range(start, end)]}
self.parse_response(
self.session.post(url, params=params, proxies=self.proxies), json_data=False
)
Expand Down Expand Up @@ -285,16 +285,16 @@ def edit_subscription(self, stream_id, action, title=None, add_folder=None, remo
url = urljoin(BASE_URL, self.EDIT_SUBSCRIPTION_PATH)
# https://us.inoreader.com/developers/edit-subscription
# The documentation looks a bit outdated, `follow`/`unfollow` don't work
action = {'follow': 'subscribe', 'unfollow': 'unsubscribe'}.get(action) or action
params = {'ac': action, 's': stream_id}
action = {"follow": "subscribe", "unfollow": "unsubscribe"}.get(action) or action
params = {"ac": action, "s": stream_id}
if title:
params['t'] = title
params["t"] = title

if add_folder:
params['a'] = add_folder
params["a"] = add_folder

if remove_folder:
params['r'] = remove_folder
params["r"] = remove_folder

r = self.session.post(url, params=params, proxies=self.proxies)
response = self.parse_response(
Expand Down
Loading
Loading