]>
Commit | Line | Data |
---|---|---|
1 | # paperdoorknob: Print glowfic | |
2 | # | |
3 | # This program is free software: you can redistribute it and/or modify it | |
4 | # under the terms of the GNU General Public License as published by the | |
5 | # Free Software Foundation, version 3. | |
6 | ||
7 | ||
8 | from argparse import ArgumentParser | |
9 | import itertools | |
10 | import os.path | |
11 | ||
12 | from typing import Iterable | |
13 | ||
14 | from bs4 import BeautifulSoup | |
15 | from bs4.element import Tag | |
16 | import requests | |
17 | import requests_cache | |
18 | from xdg_base_dirs import xdg_cache_home | |
19 | ||
20 | ||
21 | def command_line_parser() -> ArgumentParser: | |
22 | parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic') | |
23 | parser.add_argument( | |
24 | '--cache_path', | |
25 | metavar='PATH', | |
26 | help='Where to keep the http cache (instead of %(default)s)', | |
27 | default=os.path.join(xdg_cache_home(), "paperdoorknob")) | |
28 | parser.add_argument( | |
29 | '--timeout', | |
30 | help='How long to wait for HTTP requests, in seconds', | |
31 | default=30) | |
32 | parser.add_argument('url', help='URL to retrieve') | |
33 | return parser | |
34 | ||
35 | ||
36 | def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup: | |
37 | with session.get(url, timeout=timeout) as r: | |
38 | r.raise_for_status() | |
39 | return BeautifulSoup(r.text, 'html.parser') | |
40 | ||
41 | ||
42 | def clean(html: BeautifulSoup) -> BeautifulSoup: | |
43 | for eb in html.find_all("div", class_="post-edit-box"): | |
44 | eb.decompose() | |
45 | for footer in html.find_all("div", class_="post-footer"): | |
46 | footer.decompose() | |
47 | return html | |
48 | ||
49 | ||
50 | def replies(html: BeautifulSoup) -> Iterable[Tag]: | |
51 | def text() -> Tag: | |
52 | body = html.body | |
53 | assert body | |
54 | text = body.find_next("div", class_="post-post") | |
55 | assert isinstance(text, Tag) | |
56 | return text | |
57 | ||
58 | def the_replies() -> Iterable[Tag]: | |
59 | rs = html.find_all("div", class_="post-reply") | |
60 | assert all(isinstance(r, Tag) for r in rs) | |
61 | return rs | |
62 | ||
63 | return itertools.chain([text()], the_replies()) | |
64 | ||
65 | ||
66 | def process( | |
67 | url: str, | |
68 | session: requests.Session, | |
69 | timeout: int) -> Iterable[Tag]: | |
70 | html = clean(fetch(url, session, timeout)) | |
71 | return replies(html) | |
72 | ||
73 | ||
74 | def main() -> None: | |
75 | args = command_line_parser().parse_args() | |
76 | with requests_cache.CachedSession(args.cache_path, cache_control=True) as session: | |
77 | process(args.url, session, args.timeout) | |
78 | ||
79 | ||
80 | if __name__ == '__main__': | |
81 | main() |