]> git.scottworley.com Git - paperdoorknob/blob - paperdoorknob.py
a31b659d2d403c67e0a44d4e32ebfd218168cc1d
[paperdoorknob] / paperdoorknob.py
1 # paperdoorknob: Print glowfic
2 #
3 # This program is free software: you can redistribute it and/or modify it
4 # under the terms of the GNU General Public License as published by the
5 # Free Software Foundation, version 3.
6
7
8 from argparse import ArgumentParser
9 import itertools
10 import os.path
11
12 from typing import IO, Iterable
13
14 from bs4 import BeautifulSoup
15 from bs4.element import Tag
16 import requests
17 import requests_cache
18 from xdg_base_dirs import xdg_cache_home
19
20
21 def command_line_parser() -> ArgumentParser:
22 parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic')
23 parser.add_argument(
24 '--cache_path',
25 metavar='PATH',
26 help='Where to keep the http cache (instead of %(default)s)',
27 default=os.path.join(xdg_cache_home(), "paperdoorknob"))
28 parser.add_argument(
29 '--out',
30 help='The filename stem at which to write output ' +
31 '(eg: "%(default)s" produces %(default)s.text, %(default)s.pdf, etc.)',
32 default='book')
33 parser.add_argument('--pandoc', help='Location of the pandoc executable')
34 parser.add_argument(
35 '--timeout',
36 help='How long to wait for HTTP requests, in seconds',
37 default=30)
38 parser.add_argument('url', help='URL to retrieve')
39 return parser
40
41
42 def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup:
43 with session.get(url, timeout=timeout) as r:
44 r.raise_for_status()
45 return BeautifulSoup(r.text, 'html.parser')
46
47
48 def clean(html: BeautifulSoup) -> BeautifulSoup:
49 for eb in html.find_all("div", class_="post-edit-box"):
50 eb.decompose()
51 for footer in html.find_all("div", class_="post-footer"):
52 footer.decompose()
53 return html
54
55
56 def replies(html: BeautifulSoup) -> Iterable[Tag]:
57 def text() -> Tag:
58 body = html.body
59 assert body
60 text = body.find_next("div", class_="post-post")
61 assert isinstance(text, Tag)
62 return text
63
64 def the_replies() -> Iterable[Tag]:
65 rs = html.find_all("div", class_="post-reply")
66 assert all(isinstance(r, Tag) for r in rs)
67 return rs
68
69 return itertools.chain([text()], the_replies())
70
71
72 def process(
73 url: str,
74 session: requests.Session,
75 timeout: int,
76 texout: IO[str],
77 ) -> None:
78 html = clean(fetch(url, session, timeout))
79 replies(html)
80 print("soon", file=texout)
81
82
83 def main() -> None:
84 args = command_line_parser().parse_args()
85 with requests_cache.CachedSession(args.cache_path, cache_control=True) as session:
86 with open(args.out + '.tex', 'w', encoding='UTF-8') as texout:
87 process(args.url, session, args.timeout, texout)
88
89
90 if __name__ == '__main__':
91 main()