X-Git-Url: http://git.scottworley.com/paperdoorknob/blobdiff_plain/e138a9b49da14f16c1de2c02dd928fd5d16aed52..a64403ac570d0049c7b5a616d19fab37ab5cb4e8:/paperdoorknob.py diff --git a/paperdoorknob.py b/paperdoorknob.py index b4f5e52..f83c9f8 100644 --- a/paperdoorknob.py +++ b/paperdoorknob.py @@ -6,11 +6,32 @@ from argparse import ArgumentParser -import requests +import itertools +import os.path +import subprocess + +from typing import IO, Iterable + +from bs4 import BeautifulSoup +from bs4.element import Tag +from xdg_base_dirs import xdg_cache_home + +from fetch import CachingFetcher, Fetcher def command_line_parser() -> ArgumentParser: parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic') + parser.add_argument( + '--cache_path', + metavar='PATH', + help='Where to keep the http cache (instead of %(default)s)', + default=os.path.join(xdg_cache_home(), "paperdoorknob")) + parser.add_argument( + '--out', + help='The filename stem at which to write output ' + + '(eg: "%(default)s" produces %(default)s.text, %(default)s.pdf, etc.)', + default='book') + parser.add_argument('--pandoc', help='Location of the pandoc executable') parser.add_argument( '--timeout', help='How long to wait for HTTP requests, in seconds', @@ -19,15 +40,58 @@ def command_line_parser() -> ArgumentParser: return parser -def fetch(url: str, session: requests.Session, timeout: int) -> None: - with session.get(url, timeout=timeout) as r: - r.raise_for_status() +def fetch(url: str, fetcher: Fetcher) -> BeautifulSoup: + return BeautifulSoup(fetcher.fetch(url), 'html.parser') + + +def clean(html: BeautifulSoup) -> BeautifulSoup: + for eb in html.find_all("div", class_="post-edit-box"): + eb.decompose() + for footer in html.find_all("div", class_="post-footer"): + footer.decompose() + return html + + +def replies(html: BeautifulSoup) -> Iterable[Tag]: + def text() -> Tag: + body = html.body + assert body + text = body.find_next("div", class_="post-post") + assert isinstance(text, Tag) + return text + + def the_replies() -> Iterable[Tag]: + rs = html.find_all("div", class_="post-reply") + assert all(isinstance(r, Tag) for r in rs) + return rs + + return itertools.chain([text()], the_replies()) + + +def html_to_tex(pandoc: str, tag: Tag) -> bytes: + return subprocess.run([pandoc, '--from=html', '--to=latex'], + input=tag.encode(), + stdout=subprocess.PIPE, + check=True).stdout + + +def process( + url: str, + fetcher: Fetcher, + texout: IO[bytes], + pandoc: str) -> None: + texout.write(b'\\documentclass{article}\n\\begin{document}\n') + html = clean(fetch(url, fetcher)) + for r in replies(html): + texout.write(html_to_tex(pandoc, r)) + texout.write(b'\\end{document}\n') def main() -> None: args = command_line_parser().parse_args() - with requests.session() as session: - fetch(args.url, session, args.timeout) + with CachingFetcher(args.cache_path, args.timeout) as fetcher: + with open(args.out + '.tex', 'wb') as texout: + process(args.url, fetcher, texout, args.pandoc or 'pandoc') if __name__ == '__main__':