]> git.scottworley.com Git - paperdoorknob/blame - paperdoorknob.py
Open output file
[paperdoorknob] / paperdoorknob.py
CommitLineData
92b11a10
SW
1# paperdoorknob: Print glowfic
2#
3# This program is free software: you can redistribute it and/or modify it
4# under the terms of the GNU General Public License as published by the
5# Free Software Foundation, version 3.
6
7
8from argparse import ArgumentParser
55958ec0 9import itertools
ba3b7c52 10import os.path
a0d30541 11
a2d42468 12from typing import IO, Iterable
a0d30541 13
136277e3 14from bs4 import BeautifulSoup
6409066b 15from bs4.element import Tag
b25a2f90 16import requests
b34a368f 17import requests_cache
ba3b7c52 18from xdg_base_dirs import xdg_cache_home
92b11a10
SW
19
20
21def command_line_parser() -> ArgumentParser:
22 parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic')
ba3b7c52
SW
23 parser.add_argument(
24 '--cache_path',
25 metavar='PATH',
26 help='Where to keep the http cache (instead of %(default)s)',
27 default=os.path.join(xdg_cache_home(), "paperdoorknob"))
a2d42468
SW
28 parser.add_argument(
29 '--out',
30 help='The filename stem at which to write output ' +
31 '(eg: "%(default)s" produces %(default)s.text, %(default)s.pdf, etc.)',
32 default='book')
b25a2f90
SW
33 parser.add_argument(
34 '--timeout',
35 help='How long to wait for HTTP requests, in seconds',
36 default=30)
37 parser.add_argument('url', help='URL to retrieve')
92b11a10
SW
38 return parser
39
40
136277e3 41def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup:
e138a9b4
SW
42 with session.get(url, timeout=timeout) as r:
43 r.raise_for_status()
136277e3 44 return BeautifulSoup(r.text, 'html.parser')
b25a2f90
SW
45
46
47cfa3cd
SW
47def clean(html: BeautifulSoup) -> BeautifulSoup:
48 for eb in html.find_all("div", class_="post-edit-box"):
49 eb.decompose()
50 for footer in html.find_all("div", class_="post-footer"):
51 footer.decompose()
52 return html
53
54
55def replies(html: BeautifulSoup) -> Iterable[Tag]:
56 def text() -> Tag:
57 body = html.body
58 assert body
59 text = body.find_next("div", class_="post-post")
60 assert isinstance(text, Tag)
61 return text
62
63 def the_replies() -> Iterable[Tag]:
64 rs = html.find_all("div", class_="post-reply")
65 assert all(isinstance(r, Tag) for r in rs)
66 return rs
67
68 return itertools.chain([text()], the_replies())
69
70
71def process(
72 url: str,
73 session: requests.Session,
a2d42468
SW
74 timeout: int,
75 texout: IO[str],
76) -> None:
47cfa3cd 77 html = clean(fetch(url, session, timeout))
a2d42468
SW
78 replies(html)
79 print("soon", file=texout)
47cfa3cd
SW
80
81
92b11a10 82def main() -> None:
b25a2f90 83 args = command_line_parser().parse_args()
4c1cf54e 84 with requests_cache.CachedSession(args.cache_path, cache_control=True) as session:
a2d42468
SW
85 with open(args.out + '.tex', 'w', encoding='UTF-8') as texout:
86 process(args.url, session, args.timeout, texout)
92b11a10
SW
87
88
89if __name__ == '__main__':
90 main()