diff options
Diffstat (limited to 'nikola/plugins/compile')
| -rw-r--r-- | nikola/plugins/compile/html.py | 11 | ||||
| -rw-r--r-- | nikola/plugins/compile/ipynb/__init__.py | 9 | ||||
| -rw-r--r-- | nikola/plugins/compile/markdown/__init__.py | 8 | ||||
| -rw-r--r-- | nikola/plugins/compile/markdown/mdx_gist.py | 87 | ||||
| -rw-r--r-- | nikola/plugins/compile/pandoc.py | 4 | ||||
| -rw-r--r-- | nikola/plugins/compile/php.py | 25 | ||||
| -rw-r--r-- | nikola/plugins/compile/rest/__init__.py | 19 | ||||
| -rw-r--r-- | nikola/plugins/compile/rest/gist.py | 5 | ||||
| -rw-r--r-- | nikola/plugins/compile/rest/listing.py | 4 | ||||
| -rw-r--r-- | nikola/plugins/compile/rest/post_list.py | 9 |
10 files changed, 140 insertions, 41 deletions
diff --git a/nikola/plugins/compile/html.py b/nikola/plugins/compile/html.py index fff7f89..24bf385 100644 --- a/nikola/plugins/compile/html.py +++ b/nikola/plugins/compile/html.py @@ -26,14 +26,15 @@ """Implementation of compile_html for HTML source files.""" +from __future__ import unicode_literals + import os import re -import codecs +import io from nikola.plugin_categories import PageCompiler from nikola.utils import makedirs, write_metadata - _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')' @@ -43,8 +44,8 @@ class CompileHtml(PageCompiler): def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) - with codecs.open(dest, "w+", "utf8") as out_file: - with codecs.open(source, "r", "utf8") as in_file: + with io.open(dest, "w+", encoding="utf8") as out_file: + with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() if not is_two_file: data = re.split(_META_SEPARATOR, data, maxsplit=1)[-1] @@ -62,7 +63,7 @@ class CompileHtml(PageCompiler): makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!--\n') fd.write(write_metadata(metadata)) diff --git a/nikola/plugins/compile/ipynb/__init__.py b/nikola/plugins/compile/ipynb/__init__.py index f4d554c..7dde279 100644 --- a/nikola/plugins/compile/ipynb/__init__.py +++ b/nikola/plugins/compile/ipynb/__init__.py @@ -27,7 +27,7 @@ """Implementation of compile_html based on nbconvert.""" from __future__ import unicode_literals, print_function -import codecs +import io import os try: @@ -47,6 +47,7 @@ class CompileIPynb(PageCompiler): name = "ipynb" supports_onefile = False + demote_headers = True def compile_html(self, source, dest, is_two_file=True): if flag is None: @@ -55,8 +56,8 @@ class CompileIPynb(PageCompiler): HTMLExporter.default_template = 'basic' c = Config(self.site.config['IPYNB_CONFIG']) exportHtml = HTMLExporter(config=c) - with codecs.open(dest, "w+", "utf8") as out_file: - with codecs.open(source, "r", "utf8") as in_file: + with io.open(dest, "w+", encoding="utf8") as out_file: + with io.open(source, "r", encoding="utf8") as in_file: nb = in_file.read() nb_json = nbformat.reads_json(nb) (body, resources) = exportHtml.from_notebook_node(nb_json) @@ -71,7 +72,7 @@ class CompileIPynb(PageCompiler): makedirs(os.path.dirname(path)) if onefile: raise Exception('The one-file format is not supported by this compiler.') - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: fd.write("""{ "metadata": { "name": "" diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py index 4182626..47c7c9b 100644 --- a/nikola/plugins/compile/markdown/__init__.py +++ b/nikola/plugins/compile/markdown/__init__.py @@ -28,7 +28,7 @@ from __future__ import unicode_literals -import codecs +import io import os import re @@ -70,8 +70,8 @@ class CompileMarkdown(PageCompiler): req_missing(['markdown'], 'build this site (compile Markdown)') makedirs(os.path.dirname(dest)) self.extensions += self.site.config.get("MARKDOWN_EXTENSIONS") - with codecs.open(dest, "w+", "utf8") as out_file: - with codecs.open(source, "r", "utf8") as in_file: + with io.open(dest, "w+", encoding="utf8") as out_file: + with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() if not is_two_file: data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1] @@ -90,7 +90,7 @@ class CompileMarkdown(PageCompiler): makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!-- \n') fd.write(write_metadata(metadata)) diff --git a/nikola/plugins/compile/markdown/mdx_gist.py b/nikola/plugins/compile/markdown/mdx_gist.py index 247478b..4209bdd 100644 --- a/nikola/plugins/compile/markdown/mdx_gist.py +++ b/nikola/plugins/compile/markdown/mdx_gist.py @@ -65,6 +65,42 @@ Example with filename: </div> </p> +Basic Example with hexidecimal id: + + >>> import markdown + >>> text = """ + ... Text of the gist: + ... [:gist: c4a43d6fdce612284ac0] + ... """ + >>> html = markdown.markdown(text, [GistExtension()]) + >>> print(html) + <p>Text of the gist: + <div class="gist"> + <script src="https://gist.github.com/c4a43d6fdce612284ac0.js"></script> + <noscript> + <pre>Moo</pre> + </noscript> + </div> + </p> + +Example with hexidecimal id filename: + + >>> import markdown + >>> text = """ + ... Text of the gist: + ... [:gist: c4a43d6fdce612284ac0 cow.txt] + ... """ + >>> html = markdown.markdown(text, [GistExtension()]) + >>> print(html) + <p>Text of the gist: + <div class="gist"> + <script src="https://gist.github.com/c4a43d6fdce612284ac0.js?file=cow.txt"></script> + <noscript> + <pre>Moo</pre> + </noscript> + </div> + </p> + Example using reStructuredText syntax: >>> import markdown @@ -83,6 +119,42 @@ Example using reStructuredText syntax: </div> </p> +Example using hexidecimal ID with reStructuredText syntax: + + >>> import markdown + >>> text = """ + ... Text of the gist: + ... .. gist:: c4a43d6fdce612284ac0 + ... """ + >>> html = markdown.markdown(text, [GistExtension()]) + >>> print(html) + <p>Text of the gist: + <div class="gist"> + <script src="https://gist.github.com/c4a43d6fdce612284ac0.js"></script> + <noscript> + <pre>Moo</pre> + </noscript> + </div> + </p> + +Example using hexidecimal ID and filename with reStructuredText syntax: + + >>> import markdown + >>> text = """ + ... Text of the gist: + ... .. gist:: c4a43d6fdce612284ac0 cow.txt + ... """ + >>> html = markdown.markdown(text, [GistExtension()]) + >>> print(html) + <p>Text of the gist: + <div class="gist"> + <script src="https://gist.github.com/c4a43d6fdce612284ac0.js?file=cow.txt"></script> + <noscript> + <pre>Moo</pre> + </noscript> + </div> + </p> + Error Case: non-existent Gist ID: >>> import markdown @@ -95,7 +167,8 @@ Error Case: non-existent Gist ID: <p>Text of the gist: <div class="gist"> <script src="https://gist.github.com/0.js"></script> - <noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/0 --></noscript> + <noscript><!-- WARNING: Received a 404 response from Gist URL: \ +https://gist.githubusercontent.com/raw/0 --></noscript> </div> </p> @@ -111,7 +184,8 @@ Error Case: non-existent file: <p>Text of the gist: <div class="gist"> <script src="https://gist.github.com/4747847.js?file=doesntexist.py"></script> - <noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/4747847/doesntexist.py --></noscript> + <noscript><!-- WARNING: Received a 404 response from Gist URL: \ +https://gist.githubusercontent.com/raw/4747847/doesntexist.py --></noscript> </div> </p> @@ -140,11 +214,11 @@ except ImportError: GIST_JS_URL = "https://gist.github.com/{0}.js" GIST_FILE_JS_URL = "https://gist.github.com/{0}.js?file={1}" -GIST_RAW_URL = "https://gist.github.com/raw/{0}" -GIST_FILE_RAW_URL = "https://gist.github.com/raw/{0}/{1}" +GIST_RAW_URL = "https://gist.githubusercontent.com/raw/{0}" +GIST_FILE_RAW_URL = "https://gist.githubusercontent.com/raw/{0}/{1}" -GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+?))?\s*\]' -GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+))\s*$' +GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\S+)(?:\s*(?P<filename>.+?))?\s*\]' +GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>[^\]\s]+)(?:\s*(?P<filename>.+?))?\s*$' class GistFetchException(Exception): @@ -244,6 +318,5 @@ def makeExtension(configs=None): if __name__ == '__main__': import doctest - # Silence user warnings thrown by tests: doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE + doctest.REPORT_NDIFF)) diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py index 6aa737e..ada8035 100644 --- a/nikola/plugins/compile/pandoc.py +++ b/nikola/plugins/compile/pandoc.py @@ -30,7 +30,7 @@ You will need, of course, to install pandoc """ -import codecs +import io import os import subprocess @@ -62,7 +62,7 @@ class CompilePandoc(PageCompiler): makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!--\n') fd.write(write_metadata(metadata)) diff --git a/nikola/plugins/compile/php.py b/nikola/plugins/compile/php.py index 601f098..77344fb 100644 --- a/nikola/plugins/compile/php.py +++ b/nikola/plugins/compile/php.py @@ -29,11 +29,11 @@ from __future__ import unicode_literals import os -import shutil -import codecs +import io from nikola.plugin_categories import PageCompiler from nikola.utils import makedirs, write_metadata +from hashlib import md5 class CompilePhp(PageCompiler): @@ -43,7 +43,11 @@ class CompilePhp(PageCompiler): def compile_html(self, source, dest, is_two_file=True): makedirs(os.path.dirname(dest)) - shutil.copyfile(source, dest) + with io.open(dest, "w+", encoding="utf8") as out_file: + with open(source, "rb") as in_file: + hash = md5(in_file.read()).hexdigest() + out_file.write('<!-- __NIKOLA_PHP_TEMPLATE_INJECTION source:{0} checksum:{1}__ -->'.format(source, hash)) + return True def create_post(self, path, **kw): content = kw.pop('content', None) @@ -53,10 +57,21 @@ class CompilePhp(PageCompiler): metadata = {} metadata.update(self.default_metadata) metadata.update(kw) - os.makedirs(os.path.dirname(path)) + if not metadata['description']: + # For PHP, a description must be set. Otherwise, Nikola will + # take the first 200 characters of the post as the Open Graph + # description (og:description meta element)! + # If the PHP source leaks there: + # (a) The script will be executed multiple times + # (b) PHP may encounter a syntax error if it cuts too early, + # therefore completely breaking the page + # Here, we just use the title. The user should come up with + # something better, but just using the title does the job. + metadata['description'] = metadata['title'] + makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write('<!--\n') fd.write(write_metadata(metadata)) diff --git a/nikola/plugins/compile/rest/__init__.py b/nikola/plugins/compile/rest/__init__.py index a93199c..98c7151 100644 --- a/nikola/plugins/compile/rest/__init__.py +++ b/nikola/plugins/compile/rest/__init__.py @@ -25,7 +25,7 @@ # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals -import codecs +import io import os import re @@ -58,8 +58,8 @@ class CompileRest(PageCompiler): req_missing(['docutils'], 'build this site (compile reStructuredText)') makedirs(os.path.dirname(dest)) error_level = 100 - with codecs.open(dest, "w+", "utf8") as out_file: - with codecs.open(source, "r", "utf8") as in_file: + with io.open(dest, "w+", encoding="utf8") as out_file: + with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() add_ln = 0 if not is_two_file: @@ -83,11 +83,11 @@ class CompileRest(PageCompiler): 'syntax_highlight': 'short', 'math_output': 'mathjax', 'template': default_template_path, - }, logger=self.logger, l_source=source, l_add_ln=add_ln) + }, logger=self.logger, source_path=source, l_add_ln=add_ln) out_file.write(output) deps_path = dest + '.dep' if deps.list: - with codecs.open(deps_path, "wb+", "utf8") as deps_file: + with io.open(deps_path, "w+", encoding="utf8") as deps_file: deps_file.write('\n'.join(deps.list)) else: if os.path.isfile(deps_path): @@ -108,7 +108,7 @@ class CompileRest(PageCompiler): makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' - with codecs.open(path, "wb+", "utf8") as fd: + with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata)) fd.write('\n' + content) @@ -213,7 +213,7 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput, parser=None, parser_name='restructuredtext', writer=None, writer_name='html', settings=None, settings_spec=None, settings_overrides=None, config_section=None, - enable_exit_status=None, logger=None, l_source='', l_add_ln=0): + enable_exit_status=None, logger=None, l_add_ln=0): """ Set up & run a `Publisher`, and return a dictionary of document parts. Dictionary keys are the names of parts, and values are Unicode strings; @@ -237,7 +237,7 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput, # logger a logger from Nikola # source source filename (docutils gets a string) # add_ln amount of metadata lines (see comment in compile_html above) - reader.l_settings = {'logger': logger, 'source': l_source, + reader.l_settings = {'logger': logger, 'source': source_path, 'add_ln': l_add_ln} pub = docutils.core.Publisher(reader, parser, writer, settings=settings, @@ -246,7 +246,8 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput, pub.set_components(None, parser_name, writer_name) pub.process_programmatic_settings( settings_spec, settings_overrides, config_section) - pub.set_source(source, source_path) + pub.set_source(source, None) + pub.settings._nikola_source_path = source_path pub.set_destination(None, destination_path) pub.publish(enable_exit_status=enable_exit_status) diff --git a/nikola/plugins/compile/rest/gist.py b/nikola/plugins/compile/rest/gist.py index e09ed76..65189b5 100644 --- a/nikola/plugins/compile/rest/gist.py +++ b/nikola/plugins/compile/rest/gist.py @@ -49,7 +49,10 @@ class GitHubGist(Directive): def get_raw_gist(self, gistID): url = "https://gist.github.com/raw/{0}".format(gistID) - return requests.get(url).text + try: + return requests.get(url).text + except requests.exceptions.RequestException: + raise self.error('Cannot get gist for url={0}'.format(url)) def run(self): if 'https://' in self.arguments[0]: diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py index 18a1807..23ec254 100644 --- a/nikola/plugins/compile/rest/listing.py +++ b/nikola/plugins/compile/rest/listing.py @@ -29,7 +29,7 @@ from __future__ import unicode_literals -from codecs import open as codecs_open # for patching purposes +import io import os try: from urlparse import urlunsplit @@ -111,7 +111,7 @@ class Listing(Include): self.options['code'] = lang if 'linenos' in self.options: self.options['number-lines'] = self.options['linenos'] - with codecs_open(fpath, 'rb+', 'utf8') as fileobject: + with io.open(fpath, 'r+', encoding='utf8') as fileobject: self.content = fileobject.read().splitlines() self.state.document.settings.record_dependencies.add(fpath) target = urlunsplit(("link", 'listing', fname, '', '')) diff --git a/nikola/plugins/compile/rest/post_list.py b/nikola/plugins/compile/rest/post_list.py index 456e571..f719e31 100644 --- a/nikola/plugins/compile/rest/post_list.py +++ b/nikola/plugins/compile/rest/post_list.py @@ -129,6 +129,7 @@ class PostList(Directive): else: post_list_id = self.options.get('id', 'post_list_' + uuid.uuid4().hex) + filtered_timeline = [] posts = [] step = -1 if reverse is None else None if show_all is None: @@ -136,16 +137,20 @@ class PostList(Directive): else: timeline = [p for p in self.site.timeline if p.use_in_feeds] - for post in timeline[start:stop:step]: + for post in timeline: if tags: cont = True + tags_lower = [t.lower() for t in post.tags] for tag in tags: - if tag in [t.lower() for t in post.tags]: + if tag in tags_lower: cont = False if cont: continue + filtered_timeline.append(post) + + for post in filtered_timeline[start:stop:step]: if slugs: cont = True for slug in slugs: |
