]> git.scottworley.com Git - paperdoorknob/blobdiff - paperdoorknob.py
Render links as footnotes
[paperdoorknob] / paperdoorknob.py
index fb2df1f1cc1c7443bc6e07018bad7148eb12a5c6..6a556753479b96f55c535f53a14d6c444822edf0 100644 (file)
@@ -4,70 +4,55 @@
 # under the terms of the GNU General Public License as published by the
 # Free Software Foundation, version 3.
 
-
-from argparse import ArgumentParser
-import itertools
-import os.path
-
-from typing import Iterable
+from typing import Any, Iterable
 
 from bs4 import BeautifulSoup
-from bs4.element import Tag
-import requests
-import requests_cache
-from xdg_base_dirs import xdg_cache_home
-
-
-class Post:
-    def __init__(self, html: BeautifulSoup) -> None:
-        self._html = html
-        for eb in self._html.find_all("div", class_="post-edit-box"):
-            eb.decompose()
-        for footer in self._html.find_all("div", class_="post-footer"):
-            footer.decompose()
-
-    def text(self) -> Tag:
-        body = self._html.body
-        assert body
-        text = body.find_next("div", class_="post-post")
-        assert isinstance(text, Tag)
-        return text
-
-    def replies(self) -> Iterable[Tag]:
-        replies = self._html.find_all("div", class_="post-reply")
-        assert all(isinstance(r, Tag) for r in replies)
-        return replies
-
-    def entries(self) -> Iterable[Tag]:
-        return itertools.chain([self.text()], self.replies())
-
-
-def command_line_parser() -> ArgumentParser:
-    parser = ArgumentParser(prog='paperdoorknob', description='Print glowfic')
-    parser.add_argument(
-        '--cache_path',
-        metavar='PATH',
-        help='Where to keep the http cache (instead of %(default)s)',
-        default=os.path.join(xdg_cache_home(), "paperdoorknob"))
-    parser.add_argument(
-        '--timeout',
-        help='How long to wait for HTTP requests, in seconds',
-        default=30)
-    parser.add_argument('url', help='URL to retrieve')
-    return parser
-
 
-def fetch(url: str, session: requests.Session, timeout: int) -> BeautifulSoup:
-    with session.get(url, timeout=timeout) as r:
-        r.raise_for_status()
-        return BeautifulSoup(r.text, 'html.parser')
+from args import spec_from_commandline_args
+from glowfic import chunkDOMs, flatURL, makeChunk
+from spec import Spec
+
+
+def parse(content: bytes) -> BeautifulSoup:
+    return BeautifulSoup(content, 'html.parser')
+
+
+def ilen(it: Iterable[Any]) -> int:
+    return sum(1 for _ in it)
+
+
+def process(spec: Spec) -> None:
+    spec.texout.write(b'''\\documentclass{article}
+\\usepackage{booktabs}
+\\usepackage{graphicx}
+\\usepackage{longtable}
+\\usepackage{soul}
+\\usepackage{varwidth}
+\\usepackage{wrapstuff}
+''')
+    if spec.geometry is not None:
+        spec.texout.write(b'\\usepackage[' +
+                          spec.geometry.encode('UTF-8') +
+                          b']{geometry}\n')
+    spec.texout.write(b'''\\begin{document}
+\\newcommand{\\href}[2]{#2\\footnote{#1}}
+''')
+    url = flatURL(spec.url)
+    html = parse(spec.htmlfilter(spec.fetcher.fetch(url)))
+    num_chunks = ilen(chunkDOMs(html))
+    for i, r in enumerate(chunkDOMs(html)):
+        percent = 100.0 * i / num_chunks
+        spec.log(f'Processing chunk {i} of {num_chunks} ({percent:.1f}%)\r')
+        spec.domfilter(r)
+        chunk = makeChunk(r, spec.images)
+        spec.texout.write(spec.texfilter(spec.layout.renderChunk(chunk)))
+    spec.log('')
+    spec.texout.write(b'\\end{document}\n')
 
 
 def main() -> None:
-    args = command_line_parser().parse_args()
-    with requests_cache.CachedSession(args.cache_path, cache_control=True) as session:
-        html = fetch(args.url, session, args.timeout)
-        Post(html)
+    with spec_from_commandline_args() as spec:
+        process(spec)
 
 
 if __name__ == '__main__':