]> git.scottworley.com Git - paperdoorknob/blob - paperdoorknob.py
Open output file
[paperdoorknob] / paperdoorknob.py
1 # paperdoorknob: Print glowfic
2 #
3 # This program is free software: you can redistribute it and/or modify it
4 # under the terms of the GNU General Public License as published by the
5 # Free Software Foundation, version 3.
6
7
8 from argparse import ArgumentParser
9 import itertools
10 import os.path
11
12 from typing import IO, Iterable
13
14 from bs4 import BeautifulSoup
15 from bs4.element import Tag
16 import requests
17 import requests_cache
18 from xdg_base_dirs import xdg_cache_home
19
20
21 def command_line_parser() -> ArgumentParser:
22 parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic')
23 parser.add_argument(
24 '--cache_path',
25 metavar='PATH',
26 help='Where to keep the http cache (instead of %(default)s)',
27 default=os.path.join(xdg_cache_home(), "paperdoorknob"))
28 parser.add_argument(
29 '--out',
30 help='The filename stem at which to write output ' +
31 '(eg: "%(default)s" produces %(default)s.text, %(default)s.pdf, etc.)',
32 default='book')
33 parser.add_argument(
34 '--timeout',
35 help='How long to wait for HTTP requests, in seconds',
36 default=30)
37 parser.add_argument('url', help='URL to retrieve')
38 return parser
39
40
41 def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup:
42 with session.get(url, timeout=timeout) as r:
43 r.raise_for_status()
44 return BeautifulSoup(r.text, 'html.parser')
45
46
47 def clean(html: BeautifulSoup) -> BeautifulSoup:
48 for eb in html.find_all("div", class_="post-edit-box"):
49 eb.decompose()
50 for footer in html.find_all("div", class_="post-footer"):
51 footer.decompose()
52 return html
53
54
55 def replies(html: BeautifulSoup) -> Iterable[Tag]:
56 def text() -> Tag:
57 body = html.body
58 assert body
59 text = body.find_next("div", class_="post-post")
60 assert isinstance(text, Tag)
61 return text
62
63 def the_replies() -> Iterable[Tag]:
64 rs = html.find_all("div", class_="post-reply")
65 assert all(isinstance(r, Tag) for r in rs)
66 return rs
67
68 return itertools.chain([text()], the_replies())
69
70
71 def process(
72 url: str,
73 session: requests.Session,
74 timeout: int,
75 texout: IO[str],
76 ) -> None:
77 html = clean(fetch(url, session, timeout))
78 replies(html)
79 print("soon", file=texout)
80
81
82 def main() -> None:
83 args = command_line_parser().parse_args()
84 with requests_cache.CachedSession(args.cache_path, cache_control=True) as session:
85 with open(args.out + '.tex', 'w', encoding='UTF-8') as texout:
86 process(args.url, session, args.timeout, texout)
87
88
89 if __name__ == '__main__':
90 main()