]> git.scottworley.com Git - paperdoorknob/blame - paperdoorknob.py
Contemplate generating LaTeX directly
[paperdoorknob] / paperdoorknob.py
CommitLineData
92b11a10
SW
1# paperdoorknob: Print glowfic
2#
3# This program is free software: you can redistribute it and/or modify it
4# under the terms of the GNU General Public License as published by the
5# Free Software Foundation, version 3.
6
7
8from argparse import ArgumentParser
55958ec0 9import itertools
ba3b7c52 10import os.path
a0d30541 11
a2d42468 12from typing import IO, Iterable
a0d30541 13
136277e3 14from bs4 import BeautifulSoup
6409066b 15from bs4.element import Tag
ba3b7c52 16from xdg_base_dirs import xdg_cache_home
92b11a10 17
a64403ac 18from fetch import CachingFetcher, Fetcher
79631507 19from texify import PandocTexifier, Texifier
a64403ac 20
92b11a10
SW
21
22def command_line_parser() -> ArgumentParser:
23 parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic')
ba3b7c52
SW
24 parser.add_argument(
25 '--cache_path',
26 metavar='PATH',
27 help='Where to keep the http cache (instead of %(default)s)',
28 default=os.path.join(xdg_cache_home(), "paperdoorknob"))
a2d42468
SW
29 parser.add_argument(
30 '--out',
31 help='The filename stem at which to write output ' +
32 '(eg: "%(default)s" produces %(default)s.text, %(default)s.pdf, etc.)',
33 default='book')
1e512629 34 parser.add_argument('--pandoc', help='Location of the pandoc executable')
b25a2f90
SW
35 parser.add_argument(
36 '--timeout',
37 help='How long to wait for HTTP requests, in seconds',
38 default=30)
39 parser.add_argument('url', help='URL to retrieve')
92b11a10
SW
40 return parser
41
42
bf06f467
SW
43def parse(content: bytes) -> BeautifulSoup:
44 return BeautifulSoup(content, 'html.parser')
b25a2f90
SW
45
46
47cfa3cd
SW
47def clean(html: BeautifulSoup) -> BeautifulSoup:
48 for eb in html.find_all("div", class_="post-edit-box"):
49 eb.decompose()
50 for footer in html.find_all("div", class_="post-footer"):
51 footer.decompose()
52 return html
53
54
55def replies(html: BeautifulSoup) -> Iterable[Tag]:
56 def text() -> Tag:
57 body = html.body
58 assert body
59 text = body.find_next("div", class_="post-post")
60 assert isinstance(text, Tag)
61 return text
62
63 def the_replies() -> Iterable[Tag]:
64 rs = html.find_all("div", class_="post-reply")
65 assert all(isinstance(r, Tag) for r in rs)
66 return rs
67
68 return itertools.chain([text()], the_replies())
69
70
71def process(
72 url: str,
a64403ac 73 fetcher: Fetcher,
79631507
SW
74 texifier: Texifier,
75 texout: IO[bytes]) -> None:
07f9b178 76 texout.write(b'\\documentclass{article}\n\\begin{document}\n')
bf06f467 77 html = clean(parse(fetcher.fetch(url)))
36ae1d5f 78 for r in replies(html):
79631507 79 texout.write(texifier.texify(r))
07f9b178 80 texout.write(b'\\end{document}\n')
47cfa3cd
SW
81
82
92b11a10 83def main() -> None:
b25a2f90 84 args = command_line_parser().parse_args()
79631507 85 texifier = PandocTexifier(args.pandoc or 'pandoc')
a64403ac 86 with CachingFetcher(args.cache_path, args.timeout) as fetcher:
36ae1d5f 87 with open(args.out + '.tex', 'wb') as texout:
79631507 88 process(args.url, fetcher, texifier, texout)
92b11a10
SW
89
90
91if __name__ == '__main__':
92 main()