X-Git-Url: http://git.scottworley.com/paperdoorknob/blobdiff_plain/136277e30143cd1219c896bb4980027ac3c6dbe1..929db57622c6b8756ed341d7ae2862126a7c638f:/paperdoorknob.py diff --git a/paperdoorknob.py b/paperdoorknob.py index bb8bdd1..34223e4 100644 --- a/paperdoorknob.py +++ b/paperdoorknob.py @@ -5,39 +5,56 @@ # Free Software Foundation, version 3. -from argparse import ArgumentParser -import os.path +import itertools + +from typing import Iterable + from bs4 import BeautifulSoup -import requests -import requests_cache -from xdg_base_dirs import xdg_cache_home +from bs4.element import Tag + +from args import spec_from_commandline_args +from spec import Spec + + +def parse(content: bytes) -> BeautifulSoup: + return BeautifulSoup(content, 'html.parser') + + +def clean(html: BeautifulSoup) -> BeautifulSoup: + for eb in html.find_all("div", class_="post-edit-box"): + eb.decompose() + for footer in html.find_all("div", class_="post-footer"): + footer.decompose() + return html + + +def replies(html: BeautifulSoup) -> Iterable[Tag]: + def text() -> Tag: + body = html.body + assert body + text = body.find_next("div", class_="post-post") + assert isinstance(text, Tag) + return text + def the_replies() -> Iterable[Tag]: + rs = html.find_all("div", class_="post-reply") + assert all(isinstance(r, Tag) for r in rs) + return rs -def command_line_parser() -> ArgumentParser: - parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic') - parser.add_argument( - '--cache_path', - metavar='PATH', - help='Where to keep the http cache (instead of %(default)s)', - default=os.path.join(xdg_cache_home(), "paperdoorknob")) - parser.add_argument( - '--timeout', - help='How long to wait for HTTP requests, in seconds', - default=30) - parser.add_argument('url', help='URL to retrieve') - return parser + return itertools.chain([text()], the_replies()) -def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup: - with session.get(url, timeout=timeout) as r: - r.raise_for_status() - return BeautifulSoup(r.text, 'html.parser') +def process(spec: Spec) -> None: + spec.texout.write(b'\\documentclass{article}\n\\begin{document}\n') + html = clean(parse(spec.htmlfilter(spec.fetcher.fetch(spec.url)))) + for r in replies(html): + spec.texout.write(spec.texifier.texify(r)) + spec.texout.write(b'\\end{document}\n') def main() -> None: - args = command_line_parser().parse_args() - with requests_cache.CachedSession(args.cache_path, cache_control=True) as session: - fetch(args.url, session, args.timeout) + with spec_from_commandline_args() as spec: + process(spec) if __name__ == '__main__':