summaryrefslogtreecommitdiffstats
path: root/nikola/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'nikola/plugins')
-rw-r--r--nikola/plugins/basic_import.py8
-rw-r--r--nikola/plugins/command/auto.py9
-rw-r--r--nikola/plugins/command/bootswatch_theme.py4
-rw-r--r--nikola/plugins/command/check.py38
-rw-r--r--nikola/plugins/command/deploy.py16
-rw-r--r--nikola/plugins/command/github_deploy.py3
-rw-r--r--nikola/plugins/command/import_wordpress.py50
-rw-r--r--nikola/plugins/command/init.py4
-rw-r--r--nikola/plugins/command/install_theme.py7
-rw-r--r--nikola/plugins/command/new_post.py8
-rw-r--r--nikola/plugins/command/plugin.py11
-rw-r--r--nikola/plugins/command/serve.py13
-rw-r--r--nikola/plugins/compile/html.py11
-rw-r--r--nikola/plugins/compile/ipynb/__init__.py9
-rw-r--r--nikola/plugins/compile/markdown/__init__.py8
-rw-r--r--nikola/plugins/compile/markdown/mdx_gist.py87
-rw-r--r--nikola/plugins/compile/pandoc.py4
-rw-r--r--nikola/plugins/compile/php.py25
-rw-r--r--nikola/plugins/compile/rest/__init__.py19
-rw-r--r--nikola/plugins/compile/rest/gist.py5
-rw-r--r--nikola/plugins/compile/rest/listing.py4
-rw-r--r--nikola/plugins/compile/rest/post_list.py9
-rw-r--r--nikola/plugins/task/bundles.py10
-rw-r--r--nikola/plugins/task/copy_assets.py8
-rw-r--r--nikola/plugins/task/copy_files.py2
-rw-r--r--nikola/plugins/task/galleries.py16
-rw-r--r--nikola/plugins/task/indexes.py35
-rw-r--r--nikola/plugins/task/listings.py5
-rw-r--r--nikola/plugins/task/redirect.py5
-rw-r--r--nikola/plugins/task/robots.py10
-rw-r--r--nikola/plugins/task/sitemap/__init__.py11
-rw-r--r--nikola/plugins/task/sources.py8
-rw-r--r--nikola/plugins/task/tags.py11
-rw-r--r--nikola/plugins/template/jinja.py2
34 files changed, 315 insertions, 160 deletions
diff --git a/nikola/plugins/basic_import.py b/nikola/plugins/basic_import.py
index 7b23f9c..764968a 100644
--- a/nikola/plugins/basic_import.py
+++ b/nikola/plugins/basic_import.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
-import codecs
+import io
import csv
import datetime
import os
@@ -128,7 +128,7 @@ class ImportMixin(object):
description = ""
utils.makedirs(os.path.dirname(filename))
- with codecs.open(filename, "w+", "utf8") as fd:
+ with io.open(filename, "w+", encoding="utf8") as fd:
fd.write('{0}\n'.format(title))
fd.write('{0}\n'.format(slug))
fd.write('{0}\n'.format(post_date))
@@ -139,7 +139,7 @@ class ImportMixin(object):
@staticmethod
def write_urlmap_csv(output_file, url_map):
utils.makedirs(os.path.dirname(output_file))
- with codecs.open(output_file, 'w+', 'utf8') as fd:
+ with io.open(output_file, 'w+', encoding='utf8') as fd:
csv_writer = csv.writer(fd)
for item in url_map.items():
csv_writer.writerow(item)
@@ -159,7 +159,7 @@ class ImportMixin(object):
@staticmethod
def write_configuration(filename, rendered_template):
utils.makedirs(os.path.dirname(filename))
- with codecs.open(filename, 'w+', 'utf8') as fd:
+ with io.open(filename, 'w+', encoding='utf8') as fd:
fd.write(rendered_template)
diff --git a/nikola/plugins/command/auto.py b/nikola/plugins/command/auto.py
index c46e0a3..7f3f66f 100644
--- a/nikola/plugins/command/auto.py
+++ b/nikola/plugins/command/auto.py
@@ -28,7 +28,6 @@ from __future__ import print_function, unicode_literals
import os
import subprocess
-import webbrowser
from nikola.plugin_categories import Command
from nikola.utils import req_missing
@@ -61,7 +60,7 @@ class CommandAuto(Command):
try:
from livereload import Server
except ImportError:
- req_missing(['livereload==2.1.0'], 'use the "auto" command')
+ req_missing(['livereload'], 'use the "auto" command')
return
# Run an initial build so we are up-to-date
@@ -81,6 +80,8 @@ class CommandAuto(Command):
out_folder = self.site.config['OUTPUT_FOLDER']
if options and options.get('browser'):
- webbrowser.open('http://localhost:{0}'.format(port))
+ browser = True
+ else:
+ browser = False
- server.serve(port, None, out_folder)
+ server.serve(port, None, out_folder, True, browser)
diff --git a/nikola/plugins/command/bootswatch_theme.py b/nikola/plugins/command/bootswatch_theme.py
index 871a5ce..e65413b 100644
--- a/nikola/plugins/command/bootswatch_theme.py
+++ b/nikola/plugins/command/bootswatch_theme.py
@@ -82,9 +82,9 @@ class CommandBootswatchTheme(Command):
# See if we need bootswatch for bootstrap v2 or v3
themes = utils.get_theme_chain(parent)
- if 'bootstrap3' not in themes:
+ if 'bootstrap3' not in themes or 'bootstrap3-jinja' not in themes:
version = '2'
- elif 'bootstrap' not in themes:
+ elif 'bootstrap' not in themes or 'bootstrap-jinja' not in themes:
LOGGER.warn('"bootswatch_theme" only makes sense for themes that use bootstrap')
elif 'bootstrap3-gradients' in themes or 'bootstrap3-gradients-jinja' in themes:
LOGGER.warn('"bootswatch_theme" doesn\'t work well with the bootstrap3-gradients family')
diff --git a/nikola/plugins/command/check.py b/nikola/plugins/command/check.py
index 76571a0..bd254f4 100644
--- a/nikola/plugins/command/check.py
+++ b/nikola/plugins/command/check.py
@@ -30,9 +30,9 @@ import re
import sys
try:
from urllib import unquote
- from urlparse import urlparse
+ from urlparse import urlparse, urljoin, urldefrag
except ImportError:
- from urllib.parse import unquote, urlparse # NOQA
+ from urllib.parse import unquote, urlparse, urljoin, urldefrag # NOQA
import lxml.html
@@ -63,6 +63,15 @@ def real_scan_files(site):
return (only_on_output, only_on_input)
+def fs_relpath_from_url_path(url_path):
+ """Expects as input an urlparse(s).path"""
+ url_path = unquote(url_path)
+ # in windows relative paths don't begin with os.sep
+ if sys.platform == 'win32' and len(url_path):
+ url_path = url_path[1:].replace('/', '\\')
+ return url_path
+
+
class CommandCheck(Command):
"""Check the generated site."""
@@ -142,6 +151,8 @@ class CommandCheck(Command):
self.existing_targets.add(self.site.config['SITE_URL'])
self.existing_targets.add(self.site.config['BASE_URL'])
url_type = self.site.config['URL_TYPE']
+ if url_type == 'absolute':
+ url_netloc_to_root = urlparse(self.site.config['SITE_URL']).path
try:
filename = task.split(":")[-1]
d = lxml.html.fromstring(open(filename).read())
@@ -149,6 +160,7 @@ class CommandCheck(Command):
target = l[0].attrib[l[1]]
if target == "#":
continue
+ target, _ = urldefrag(target)
parsed = urlparse(target)
# Absolute links when using only paths, skip.
@@ -159,24 +171,20 @@ class CommandCheck(Command):
if (parsed.scheme or target.startswith('//')) and parsed.netloc != base_url.netloc:
continue
- if parsed.fragment:
- target = target.split('#')[0]
if url_type == 'rel_path':
target_filename = os.path.abspath(
os.path.join(os.path.dirname(filename), unquote(target)))
elif url_type in ('full_path', 'absolute'):
- target_filename = os.path.abspath(
- os.path.join(os.path.dirname(filename), parsed.path))
- if parsed.path in ['', '/']:
- target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.config['INDEX_FILE'])
- elif parsed.path.endswith('/'): # abspath removes trailing slashes
- target_filename += '/{0}'.format(self.site.config['INDEX_FILE'])
- if target_filename.startswith(base_url.path):
- target_filename = target_filename[len(base_url.path):]
- target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], target_filename)
- if parsed.path in ['', '/']:
- target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.config['INDEX_FILE'])
+ if url_type == 'absolute':
+ # convert to 'full_path' case, ie url relative to root
+ url_rel_path = target.path[len(url_netloc_to_root):]
+ else:
+ url_rel_path = target.path
+ if url_rel_path == '' or url_rel_path.endswith('/'):
+ url_rel_path = urljoin(url_rel_path, self.site.config['INDEX_FILE'])
+ fs_rel_path = fs_relpath_from_url_path(url_rel_path)
+ target_filename = os.path.join(self.site.config['OUTPUT_FOLDER'], fs_rel_path)
if any(re.match(x, target_filename) for x in self.whitelist):
continue
diff --git a/nikola/plugins/command/deploy.py b/nikola/plugins/command/deploy.py
index 1bec1d3..fde43fa 100644
--- a/nikola/plugins/command/deploy.py
+++ b/nikola/plugins/command/deploy.py
@@ -25,8 +25,9 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
-import codecs
+import io
from datetime import datetime
+from dateutil.tz import gettz
import os
import sys
import subprocess
@@ -35,7 +36,7 @@ import time
from blinker import signal
from nikola.plugin_categories import Command
-from nikola.utils import remove_file, get_logger
+from nikola.utils import get_logger, remove_file, unicode_str
class CommandDeploy(Command):
@@ -84,7 +85,7 @@ class CommandDeploy(Command):
self.logger.info("Successful deployment")
try:
- with codecs.open(timestamp_path, 'rb', 'utf8') as inf:
+ with io.open(timestamp_path, 'r', encoding='utf8') as inf:
last_deploy = datetime.strptime(inf.read().strip(), "%Y-%m-%dT%H:%M:%S.%f")
clean = False
except (IOError, Exception) as e:
@@ -96,8 +97,8 @@ class CommandDeploy(Command):
self._emit_deploy_event(last_deploy, new_deploy, clean, undeployed_posts)
# Store timestamp of successful deployment
- with codecs.open(timestamp_path, 'wb+', 'utf8') as outf:
- outf.write(new_deploy.isoformat())
+ with io.open(timestamp_path, 'w+', encoding='utf8') as outf:
+ outf.write(unicode_str(new_deploy.isoformat()))
def _emit_deploy_event(self, last_deploy, new_deploy, clean=False, undeployed=None):
""" Emit events for all timeline entries newer than last deploy.
@@ -120,9 +121,12 @@ class CommandDeploy(Command):
'undeployed': undeployed
}
+ if last_deploy.tzinfo is None:
+ last_deploy = last_deploy.replace(tzinfo=gettz('UTC'))
+
deployed = [
entry for entry in self.site.timeline
- if entry.date > last_deploy.replace(tzinfo=self.site.tzinfo) and entry not in undeployed
+ if entry.date > last_deploy and entry not in undeployed
]
event['deployed'] = deployed
diff --git a/nikola/plugins/command/github_deploy.py b/nikola/plugins/command/github_deploy.py
index d4dd8c5..13da48c 100644
--- a/nikola/plugins/command/github_deploy.py
+++ b/nikola/plugins/command/github_deploy.py
@@ -135,9 +135,10 @@ class CommandGitHubDeploy(Command):
)
commands = [
+ ['git', 'pull', remote, '%s:%s' % (deploy, deploy)],
['git', 'add', '-A'],
['git', 'commit', '-m', commit_message],
- ['git', 'push', '-f', remote, '%s:%s' % (deploy, deploy)],
+ ['git', 'push', remote, '%s:%s' % (deploy, deploy)],
['git', 'checkout', source],
]
diff --git a/nikola/plugins/command/import_wordpress.py b/nikola/plugins/command/import_wordpress.py
index 8ddc8c7..1af4083 100644
--- a/nikola/plugins/command/import_wordpress.py
+++ b/nikola/plugins/command/import_wordpress.py
@@ -158,6 +158,7 @@ class CommandImportWordpress(Command, ImportMixin):
channel = self.get_channel_from_file(self.wordpress_export_file)
self.context = self.populate_context(channel)
+ self.base_dir = urlparse(self.context['BASE_URL']).path
conf_template = self.generate_base_site()
# If user has specified a custom pattern for translation files we
@@ -323,13 +324,15 @@ class CommandImportWordpress(Command, ImportMixin):
# your blogging into another site or system its not.
# Why don't they just use JSON?
if sys.version_info[0] == 2:
- metadata = phpserialize.loads(utils.sys_encode(meta_value.text))
- size_key = 'sizes'
- file_key = 'file'
+ try:
+ metadata = phpserialize.loads(utils.sys_encode(meta_value.text))
+ except ValueError:
+ # local encoding might be wrong sometimes
+ metadata = phpserialize.loads(meta_value.text.encode('utf-8'))
else:
- metadata = phpserialize.loads(meta_value.text.encode('UTF-8'))
- size_key = b'sizes'
- file_key = b'file'
+ metadata = phpserialize.loads(meta_value.text.encode('utf-8'))
+ size_key = b'sizes'
+ file_key = b'file'
if size_key not in metadata:
continue
@@ -385,26 +388,34 @@ class CommandImportWordpress(Command, ImportMixin):
# link is something like http://foo.com/2012/09/01/hello-world/
# So, take the path, utils.slugify it, and that's our slug
link = get_text_tag(item, 'link', None)
- path = unquote(urlparse(link).path.strip('/'))
+ parsed = urlparse(link)
+ path = unquote(parsed.path.strip('/'))
# In python 2, path is a str. slug requires a unicode
# object. According to wikipedia, unquoted strings will
# usually be UTF8
if isinstance(path, utils.bytes_str):
path = path.decode('utf8')
+
+ # Cut out the base directory.
+ if path.startswith(self.base_dir.strip('/')):
+ path = path.replace(self.base_dir.strip('/'), '', 1)
+
pathlist = path.split('/')
- if len(pathlist) > 1:
- out_folder = os.path.join(*([out_folder] + pathlist[:-1]))
- slug = utils.slugify(pathlist[-1])
- if not slug: # it happens if the post has no "nice" URL
+ if parsed.query: # if there are no nice URLs and query strings are used
+ out_folder = os.path.join(*([out_folder] + pathlist))
slug = get_text_tag(
item, '{{{0}}}post_name'.format(wordpress_namespace), None)
- if not slug: # it *may* happen
- slug = get_text_tag(
- item, '{{{0}}}post_id'.format(wordpress_namespace), None)
- if not slug: # should never happen
- LOGGER.error("Error converting post:", title)
- return
+ if not slug: # it *may* happen
+ slug = get_text_tag(
+ item, '{{{0}}}post_id'.format(wordpress_namespace), None)
+ if not slug: # should never happen
+ LOGGER.error("Error converting post:", title)
+ return
+ else:
+ if len(pathlist) > 1:
+ out_folder = os.path.join(*([out_folder] + pathlist[:-1]))
+ slug = utils.slugify(pathlist[-1])
description = get_text_tag(item, 'description', '')
post_date = get_text_tag(
@@ -440,8 +451,9 @@ class CommandImportWordpress(Command, ImportMixin):
LOGGER.notice('Draft "{0}" will not be imported.'.format(title))
elif content.strip():
# If no content is found, no files are written.
- self.url_map[link] = (self.context['SITE_URL'] + out_folder + '/'
- + slug + '.html')
+ self.url_map[link] = (self.context['SITE_URL'] +
+ out_folder.rstrip('/') + '/' + slug +
+ '.html').replace(os.sep, '/')
if hasattr(self, "separate_qtranslate_content") \
and self.separate_qtranslate_content:
content_translations = separate_qtranslate_content(content)
diff --git a/nikola/plugins/command/init.py b/nikola/plugins/command/init.py
index 8fb15e0..a8b60db 100644
--- a/nikola/plugins/command/init.py
+++ b/nikola/plugins/command/init.py
@@ -27,7 +27,7 @@
from __future__ import print_function, unicode_literals
import os
import shutil
-import codecs
+import io
import json
import textwrap
import datetime
@@ -242,7 +242,7 @@ class CommandInit(Command):
template_path = resource_filename('nikola', 'conf.py.in')
conf_template = Template(filename=template_path)
conf_path = os.path.join(target, 'conf.py')
- with codecs.open(conf_path, 'w+', 'utf8') as fd:
+ with io.open(conf_path, 'w+', encoding='utf8') as fd:
fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))
@classmethod
diff --git a/nikola/plugins/command/install_theme.py b/nikola/plugins/command/install_theme.py
index 859bd56..5397772 100644
--- a/nikola/plugins/command/install_theme.py
+++ b/nikola/plugins/command/install_theme.py
@@ -26,10 +26,9 @@
from __future__ import print_function
import os
-import codecs
+import io
import json
import shutil
-from io import BytesIO
import pygments
from pygments.lexers import PythonLexer
@@ -137,7 +136,7 @@ class CommandInstallTheme(Command):
if name in data:
utils.makedirs(self.output_dir)
LOGGER.info('Downloading: ' + data[name])
- zip_file = BytesIO()
+ zip_file = io.BytesIO()
zip_file.write(requests.get(data[name]).content)
LOGGER.info('Extracting: {0} into themes'.format(name))
utils.extract_all(zip_file)
@@ -161,7 +160,7 @@ class CommandInstallTheme(Command):
if os.path.exists(confpypath):
LOGGER.notice('This theme has a sample config file. Integrate it with yours in order to make this theme work!')
print('Contents of the conf.py.sample file:\n')
- with codecs.open(confpypath, 'rb', 'utf-8') as fh:
+ with io.open(confpypath, 'r', encoding='utf-8') as fh:
if self.site.colorful:
print(indent(pygments.highlight(
fh.read(), PythonLexer(), TerminalFormatter()),
diff --git a/nikola/plugins/command/new_post.py b/nikola/plugins/command/new_post.py
index 42f77cc..24c09d0 100644
--- a/nikola/plugins/command/new_post.py
+++ b/nikola/plugins/command/new_post.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
-import codecs
+import io
import datetime
import os
import sys
@@ -332,7 +332,7 @@ class CommandNewPost(Command):
event = dict(path=txt_path)
if not onefile: # write metadata file
- with codecs.open(meta_path, "wb+", "utf8") as fd:
+ with io.open(meta_path, "w+", encoding="utf8") as fd:
fd.write(utils.write_metadata(data))
LOGGER.info("Your {0}'s metadata is at: {1}".format(content_type, meta_path))
event['meta_path'] = meta_path
@@ -341,8 +341,8 @@ class CommandNewPost(Command):
signal('new_' + content_type).send(self, **event)
if options['edit']:
- editor = os.getenv('EDITOR')
- to_run = [editor, txt_path]
+ editor = os.getenv('EDITOR', '').split()
+ to_run = editor + [txt_path]
if not onefile:
to_run.append(meta_path)
if editor:
diff --git a/nikola/plugins/command/plugin.py b/nikola/plugins/command/plugin.py
index df0e7a4..71901b8 100644
--- a/nikola/plugins/command/plugin.py
+++ b/nikola/plugins/command/plugin.py
@@ -25,8 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
-import codecs
-from io import BytesIO
+import io
import os
import shutil
import subprocess
@@ -228,7 +227,7 @@ class CommandPlugin(Command):
if name in data:
utils.makedirs(self.output_dir)
LOGGER.info('Downloading: ' + data[name])
- zip_file = BytesIO()
+ zip_file = io.BytesIO()
zip_file.write(requests.get(data[name]).content)
LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))
utils.extract_all(zip_file, self.output_dir)
@@ -258,7 +257,7 @@ class CommandPlugin(Command):
except subprocess.CalledProcessError:
LOGGER.error('Could not install the dependencies.')
print('Contents of the requirements.txt file:\n')
- with codecs.open(reqpath, 'rb', 'utf-8') as fh:
+ with io.open(reqpath, 'r', encoding='utf-8') as fh:
print(indent(fh.read(), 4 * ' '))
print('You have to install those yourself or through a '
'package manager.')
@@ -270,7 +269,7 @@ class CommandPlugin(Command):
'dependencies you need to install '
'manually.')
print('Contents of the requirements-nonpy.txt file:\n')
- with codecs.open(reqnpypath, 'rb', 'utf-8') as fh:
+ with io.open(reqnpypath, 'r', encoding='utf-8') as fh:
for l in fh.readlines():
i, j = l.split('::')
print(indent(i.strip(), 4 * ' '))
@@ -283,7 +282,7 @@ class CommandPlugin(Command):
if os.path.exists(confpypath):
LOGGER.notice('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')
print('Contents of the conf.py.sample file:\n')
- with codecs.open(confpypath, 'rb', 'utf-8') as fh:
+ with io.open(confpypath, 'r', encoding='utf-8') as fh:
if self.site.colorful:
print(indent(pygments.highlight(
fh.read(), PythonLexer(), TerminalFormatter()),
diff --git a/nikola/plugins/command/serve.py b/nikola/plugins/command/serve.py
index 623e2db..de4f6e2 100644
--- a/nikola/plugins/command/serve.py
+++ b/nikola/plugins/command/serve.py
@@ -60,8 +60,8 @@ class CommandServe(Command):
'short': 'a',
'long': 'address',
'type': str,
- 'default': '127.0.0.1',
- 'help': 'Address to bind (default: 127.0.0.1)',
+ 'default': '',
+ 'help': 'Address to bind (default: 0.0.0.0 – all local interfaces)',
},
{
'name': 'browser',
@@ -84,10 +84,10 @@ class CommandServe(Command):
httpd = HTTPServer((options['address'], options['port']),
OurHTTPRequestHandler)
sa = httpd.socket.getsockname()
- self.logger.info("Serving HTTP on {0} port {1} ...".format(*sa))
+ self.logger.info("Serving HTTP on {0} port {1}...".format(*sa))
if options['browser']:
- server_url = "http://{0}:{1}/".format(options['address'], options['port'])
- self.logger.info("Opening {0} in the default web browser ...".format(server_url))
+ server_url = "http://{0}:{1}/".format(*sa)
+ self.logger.info("Opening {0} in the default web browser...".format(server_url))
webbrowser.open(server_url)
try:
httpd.serve_forever()
@@ -156,6 +156,9 @@ class OurHTTPRequestHandler(SimpleHTTPRequestHandler):
return None
self.send_response(200)
self.send_header("Content-type", ctype)
+ if os.path.splitext(path)[1] == '.svgz':
+ # Special handling for svgz to make it work nice with browsers.
+ self.send_header("Content-Encoding", 'gzip')
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
diff --git a/nikola/plugins/compile/html.py b/nikola/plugins/compile/html.py
index fff7f89..24bf385 100644
--- a/nikola/plugins/compile/html.py
+++ b/nikola/plugins/compile/html.py
@@ -26,14 +26,15 @@
"""Implementation of compile_html for HTML source files."""
+from __future__ import unicode_literals
+
import os
import re
-import codecs
+import io
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, write_metadata
-
_META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
@@ -43,8 +44,8 @@ class CompileHtml(PageCompiler):
def compile_html(self, source, dest, is_two_file=True):
makedirs(os.path.dirname(dest))
- with codecs.open(dest, "w+", "utf8") as out_file:
- with codecs.open(source, "r", "utf8") as in_file:
+ with io.open(dest, "w+", encoding="utf8") as out_file:
+ with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split(_META_SEPARATOR, data, maxsplit=1)[-1]
@@ -62,7 +63,7 @@ class CompileHtml(PageCompiler):
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write('<!--\n')
fd.write(write_metadata(metadata))
diff --git a/nikola/plugins/compile/ipynb/__init__.py b/nikola/plugins/compile/ipynb/__init__.py
index f4d554c..7dde279 100644
--- a/nikola/plugins/compile/ipynb/__init__.py
+++ b/nikola/plugins/compile/ipynb/__init__.py
@@ -27,7 +27,7 @@
"""Implementation of compile_html based on nbconvert."""
from __future__ import unicode_literals, print_function
-import codecs
+import io
import os
try:
@@ -47,6 +47,7 @@ class CompileIPynb(PageCompiler):
name = "ipynb"
supports_onefile = False
+ demote_headers = True
def compile_html(self, source, dest, is_two_file=True):
if flag is None:
@@ -55,8 +56,8 @@ class CompileIPynb(PageCompiler):
HTMLExporter.default_template = 'basic'
c = Config(self.site.config['IPYNB_CONFIG'])
exportHtml = HTMLExporter(config=c)
- with codecs.open(dest, "w+", "utf8") as out_file:
- with codecs.open(source, "r", "utf8") as in_file:
+ with io.open(dest, "w+", encoding="utf8") as out_file:
+ with io.open(source, "r", encoding="utf8") as in_file:
nb = in_file.read()
nb_json = nbformat.reads_json(nb)
(body, resources) = exportHtml.from_notebook_node(nb_json)
@@ -71,7 +72,7 @@ class CompileIPynb(PageCompiler):
makedirs(os.path.dirname(path))
if onefile:
raise Exception('The one-file format is not supported by this compiler.')
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
fd.write("""{
"metadata": {
"name": ""
diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py
index 4182626..47c7c9b 100644
--- a/nikola/plugins/compile/markdown/__init__.py
+++ b/nikola/plugins/compile/markdown/__init__.py
@@ -28,7 +28,7 @@
from __future__ import unicode_literals
-import codecs
+import io
import os
import re
@@ -70,8 +70,8 @@ class CompileMarkdown(PageCompiler):
req_missing(['markdown'], 'build this site (compile Markdown)')
makedirs(os.path.dirname(dest))
self.extensions += self.site.config.get("MARKDOWN_EXTENSIONS")
- with codecs.open(dest, "w+", "utf8") as out_file:
- with codecs.open(source, "r", "utf8") as in_file:
+ with io.open(dest, "w+", encoding="utf8") as out_file:
+ with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
if not is_two_file:
data = re.split('(\n\n|\r\n\r\n)', data, maxsplit=1)[-1]
@@ -90,7 +90,7 @@ class CompileMarkdown(PageCompiler):
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write('<!-- \n')
fd.write(write_metadata(metadata))
diff --git a/nikola/plugins/compile/markdown/mdx_gist.py b/nikola/plugins/compile/markdown/mdx_gist.py
index 247478b..4209bdd 100644
--- a/nikola/plugins/compile/markdown/mdx_gist.py
+++ b/nikola/plugins/compile/markdown/mdx_gist.py
@@ -65,6 +65,42 @@ Example with filename:
</div>
</p>
+Basic Example with hexidecimal id:
+
+ >>> import markdown
+ >>> text = """
+ ... Text of the gist:
+ ... [:gist: c4a43d6fdce612284ac0]
+ ... """
+ >>> html = markdown.markdown(text, [GistExtension()])
+ >>> print(html)
+ <p>Text of the gist:
+ <div class="gist">
+ <script src="https://gist.github.com/c4a43d6fdce612284ac0.js"></script>
+ <noscript>
+ <pre>Moo</pre>
+ </noscript>
+ </div>
+ </p>
+
+Example with hexidecimal id filename:
+
+ >>> import markdown
+ >>> text = """
+ ... Text of the gist:
+ ... [:gist: c4a43d6fdce612284ac0 cow.txt]
+ ... """
+ >>> html = markdown.markdown(text, [GistExtension()])
+ >>> print(html)
+ <p>Text of the gist:
+ <div class="gist">
+ <script src="https://gist.github.com/c4a43d6fdce612284ac0.js?file=cow.txt"></script>
+ <noscript>
+ <pre>Moo</pre>
+ </noscript>
+ </div>
+ </p>
+
Example using reStructuredText syntax:
>>> import markdown
@@ -83,6 +119,42 @@ Example using reStructuredText syntax:
</div>
</p>
+Example using hexidecimal ID with reStructuredText syntax:
+
+ >>> import markdown
+ >>> text = """
+ ... Text of the gist:
+ ... .. gist:: c4a43d6fdce612284ac0
+ ... """
+ >>> html = markdown.markdown(text, [GistExtension()])
+ >>> print(html)
+ <p>Text of the gist:
+ <div class="gist">
+ <script src="https://gist.github.com/c4a43d6fdce612284ac0.js"></script>
+ <noscript>
+ <pre>Moo</pre>
+ </noscript>
+ </div>
+ </p>
+
+Example using hexidecimal ID and filename with reStructuredText syntax:
+
+ >>> import markdown
+ >>> text = """
+ ... Text of the gist:
+ ... .. gist:: c4a43d6fdce612284ac0 cow.txt
+ ... """
+ >>> html = markdown.markdown(text, [GistExtension()])
+ >>> print(html)
+ <p>Text of the gist:
+ <div class="gist">
+ <script src="https://gist.github.com/c4a43d6fdce612284ac0.js?file=cow.txt"></script>
+ <noscript>
+ <pre>Moo</pre>
+ </noscript>
+ </div>
+ </p>
+
Error Case: non-existent Gist ID:
>>> import markdown
@@ -95,7 +167,8 @@ Error Case: non-existent Gist ID:
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/0.js"></script>
- <noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/0 --></noscript>
+ <noscript><!-- WARNING: Received a 404 response from Gist URL: \
+https://gist.githubusercontent.com/raw/0 --></noscript>
</div>
</p>
@@ -111,7 +184,8 @@ Error Case: non-existent file:
<p>Text of the gist:
<div class="gist">
<script src="https://gist.github.com/4747847.js?file=doesntexist.py"></script>
- <noscript><!-- WARNING: Received a 404 response from Gist URL: https://gist.github.com/raw/4747847/doesntexist.py --></noscript>
+ <noscript><!-- WARNING: Received a 404 response from Gist URL: \
+https://gist.githubusercontent.com/raw/4747847/doesntexist.py --></noscript>
</div>
</p>
@@ -140,11 +214,11 @@ except ImportError:
GIST_JS_URL = "https://gist.github.com/{0}.js"
GIST_FILE_JS_URL = "https://gist.github.com/{0}.js?file={1}"
-GIST_RAW_URL = "https://gist.github.com/raw/{0}"
-GIST_FILE_RAW_URL = "https://gist.github.com/raw/{0}/{1}"
+GIST_RAW_URL = "https://gist.githubusercontent.com/raw/{0}"
+GIST_FILE_RAW_URL = "https://gist.githubusercontent.com/raw/{0}/{1}"
-GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+?))?\s*\]'
-GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>\d+)(?:\s*(?P<filename>.+))\s*$'
+GIST_MD_RE = r'\[:gist:\s*(?P<gist_id>\S+)(?:\s*(?P<filename>.+?))?\s*\]'
+GIST_RST_RE = r'(?m)^\.\.\s*gist::\s*(?P<gist_id>[^\]\s]+)(?:\s*(?P<filename>.+?))?\s*$'
class GistFetchException(Exception):
@@ -244,6 +318,5 @@ def makeExtension(configs=None):
if __name__ == '__main__':
import doctest
- # Silence user warnings thrown by tests:
doctest.testmod(optionflags=(doctest.NORMALIZE_WHITESPACE +
doctest.REPORT_NDIFF))
diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py
index 6aa737e..ada8035 100644
--- a/nikola/plugins/compile/pandoc.py
+++ b/nikola/plugins/compile/pandoc.py
@@ -30,7 +30,7 @@ You will need, of course, to install pandoc
"""
-import codecs
+import io
import os
import subprocess
@@ -62,7 +62,7 @@ class CompilePandoc(PageCompiler):
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write('<!--\n')
fd.write(write_metadata(metadata))
diff --git a/nikola/plugins/compile/php.py b/nikola/plugins/compile/php.py
index 601f098..77344fb 100644
--- a/nikola/plugins/compile/php.py
+++ b/nikola/plugins/compile/php.py
@@ -29,11 +29,11 @@
from __future__ import unicode_literals
import os
-import shutil
-import codecs
+import io
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, write_metadata
+from hashlib import md5
class CompilePhp(PageCompiler):
@@ -43,7 +43,11 @@ class CompilePhp(PageCompiler):
def compile_html(self, source, dest, is_two_file=True):
makedirs(os.path.dirname(dest))
- shutil.copyfile(source, dest)
+ with io.open(dest, "w+", encoding="utf8") as out_file:
+ with open(source, "rb") as in_file:
+ hash = md5(in_file.read()).hexdigest()
+ out_file.write('<!-- __NIKOLA_PHP_TEMPLATE_INJECTION source:{0} checksum:{1}__ -->'.format(source, hash))
+ return True
def create_post(self, path, **kw):
content = kw.pop('content', None)
@@ -53,10 +57,21 @@ class CompilePhp(PageCompiler):
metadata = {}
metadata.update(self.default_metadata)
metadata.update(kw)
- os.makedirs(os.path.dirname(path))
+ if not metadata['description']:
+ # For PHP, a description must be set. Otherwise, Nikola will
+ # take the first 200 characters of the post as the Open Graph
+ # description (og:description meta element)!
+ # If the PHP source leaks there:
+ # (a) The script will be executed multiple times
+ # (b) PHP may encounter a syntax error if it cuts too early,
+ # therefore completely breaking the page
+ # Here, we just use the title. The user should come up with
+ # something better, but just using the title does the job.
+ metadata['description'] = metadata['title']
+ makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write('<!--\n')
fd.write(write_metadata(metadata))
diff --git a/nikola/plugins/compile/rest/__init__.py b/nikola/plugins/compile/rest/__init__.py
index a93199c..98c7151 100644
--- a/nikola/plugins/compile/rest/__init__.py
+++ b/nikola/plugins/compile/rest/__init__.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
-import codecs
+import io
import os
import re
@@ -58,8 +58,8 @@ class CompileRest(PageCompiler):
req_missing(['docutils'], 'build this site (compile reStructuredText)')
makedirs(os.path.dirname(dest))
error_level = 100
- with codecs.open(dest, "w+", "utf8") as out_file:
- with codecs.open(source, "r", "utf8") as in_file:
+ with io.open(dest, "w+", encoding="utf8") as out_file:
+ with io.open(source, "r", encoding="utf8") as in_file:
data = in_file.read()
add_ln = 0
if not is_two_file:
@@ -83,11 +83,11 @@ class CompileRest(PageCompiler):
'syntax_highlight': 'short',
'math_output': 'mathjax',
'template': default_template_path,
- }, logger=self.logger, l_source=source, l_add_ln=add_ln)
+ }, logger=self.logger, source_path=source, l_add_ln=add_ln)
out_file.write(output)
deps_path = dest + '.dep'
if deps.list:
- with codecs.open(deps_path, "wb+", "utf8") as deps_file:
+ with io.open(deps_path, "w+", encoding="utf8") as deps_file:
deps_file.write('\n'.join(deps.list))
else:
if os.path.isfile(deps_path):
@@ -108,7 +108,7 @@ class CompileRest(PageCompiler):
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
- with codecs.open(path, "wb+", "utf8") as fd:
+ with io.open(path, "w+", encoding="utf8") as fd:
if onefile:
fd.write(write_metadata(metadata))
fd.write('\n' + content)
@@ -213,7 +213,7 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
parser=None, parser_name='restructuredtext', writer=None,
writer_name='html', settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
- enable_exit_status=None, logger=None, l_source='', l_add_ln=0):
+ enable_exit_status=None, logger=None, l_add_ln=0):
"""
Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
@@ -237,7 +237,7 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
# logger a logger from Nikola
# source source filename (docutils gets a string)
# add_ln amount of metadata lines (see comment in compile_html above)
- reader.l_settings = {'logger': logger, 'source': l_source,
+ reader.l_settings = {'logger': logger, 'source': source_path,
'add_ln': l_add_ln}
pub = docutils.core.Publisher(reader, parser, writer, settings=settings,
@@ -246,7 +246,8 @@ def rst2html(source, source_path=None, source_class=docutils.io.StringInput,
pub.set_components(None, parser_name, writer_name)
pub.process_programmatic_settings(
settings_spec, settings_overrides, config_section)
- pub.set_source(source, source_path)
+ pub.set_source(source, None)
+ pub.settings._nikola_source_path = source_path
pub.set_destination(None, destination_path)
pub.publish(enable_exit_status=enable_exit_status)
diff --git a/nikola/plugins/compile/rest/gist.py b/nikola/plugins/compile/rest/gist.py
index e09ed76..65189b5 100644
--- a/nikola/plugins/compile/rest/gist.py
+++ b/nikola/plugins/compile/rest/gist.py
@@ -49,7 +49,10 @@ class GitHubGist(Directive):
def get_raw_gist(self, gistID):
url = "https://gist.github.com/raw/{0}".format(gistID)
- return requests.get(url).text
+ try:
+ return requests.get(url).text
+ except requests.exceptions.RequestException:
+ raise self.error('Cannot get gist for url={0}'.format(url))
def run(self):
if 'https://' in self.arguments[0]:
diff --git a/nikola/plugins/compile/rest/listing.py b/nikola/plugins/compile/rest/listing.py
index 18a1807..23ec254 100644
--- a/nikola/plugins/compile/rest/listing.py
+++ b/nikola/plugins/compile/rest/listing.py
@@ -29,7 +29,7 @@
from __future__ import unicode_literals
-from codecs import open as codecs_open # for patching purposes
+import io
import os
try:
from urlparse import urlunsplit
@@ -111,7 +111,7 @@ class Listing(Include):
self.options['code'] = lang
if 'linenos' in self.options:
self.options['number-lines'] = self.options['linenos']
- with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
+ with io.open(fpath, 'r+', encoding='utf8') as fileobject:
self.content = fileobject.read().splitlines()
self.state.document.settings.record_dependencies.add(fpath)
target = urlunsplit(("link", 'listing', fname, '', ''))
diff --git a/nikola/plugins/compile/rest/post_list.py b/nikola/plugins/compile/rest/post_list.py
index 456e571..f719e31 100644
--- a/nikola/plugins/compile/rest/post_list.py
+++ b/nikola/plugins/compile/rest/post_list.py
@@ -129,6 +129,7 @@ class PostList(Directive):
else:
post_list_id = self.options.get('id', 'post_list_' + uuid.uuid4().hex)
+ filtered_timeline = []
posts = []
step = -1 if reverse is None else None
if show_all is None:
@@ -136,16 +137,20 @@ class PostList(Directive):
else:
timeline = [p for p in self.site.timeline if p.use_in_feeds]
- for post in timeline[start:stop:step]:
+ for post in timeline:
if tags:
cont = True
+ tags_lower = [t.lower() for t in post.tags]
for tag in tags:
- if tag in [t.lower() for t in post.tags]:
+ if tag in tags_lower:
cont = False
if cont:
continue
+ filtered_timeline.append(post)
+
+ for post in filtered_timeline[start:stop:step]:
if slugs:
cont = True
for slug in slugs:
diff --git a/nikola/plugins/task/bundles.py b/nikola/plugins/task/bundles.py
index 7437a9d..fca6924 100644
--- a/nikola/plugins/task/bundles.py
+++ b/nikola/plugins/task/bundles.py
@@ -122,8 +122,12 @@ def get_theme_bundles(themes):
if os.path.isfile(bundles_path):
with open(bundles_path) as fd:
for line in fd:
- name, files = line.split('=')
- files = [f.strip() for f in files.split(',')]
- bundles[name.strip().replace('/', os.sep)] = files
+ try:
+ name, files = line.split('=')
+ files = [f.strip() for f in files.split(',')]
+ bundles[name.strip().replace('/', os.sep)] = files
+ except ValueError:
+ # for empty lines
+ pass
break
return bundles
diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
index 4801347..29aa083 100644
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -24,7 +24,9 @@
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-import codecs
+from __future__ import unicode_literals
+
+import io
import os
from nikola.plugin_categories import Task
@@ -82,13 +84,13 @@ class CopyAssets(Task):
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
- with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
+ with io.open(code_css_path, 'w+', encoding='utf8') as outf:
outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
outf.write(kw["code.css_close"])
if os.path.exists(code_css_path):
- with codecs.open(code_css_path, 'r', 'utf-8') as fh:
+ with io.open(code_css_path, 'r', encoding='utf-8') as fh:
testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
else:
testcontents = False
diff --git a/nikola/plugins/task/copy_files.py b/nikola/plugins/task/copy_files.py
index 9846ca0..1d31756 100644
--- a/nikola/plugins/task/copy_files.py
+++ b/nikola/plugins/task/copy_files.py
@@ -52,4 +52,4 @@ class CopyFiles(Task):
for task in utils.copy_tree(src, real_dst, link_cutoff=dst):
task['basename'] = self.name
task['uptodate'] = [utils.config_changed(kw)]
- yield utils.apply_filters(task, filters)
+ yield utils.apply_filters(task, filters, skip_ext=['.html'])
diff --git a/nikola/plugins/task/galleries.py b/nikola/plugins/task/galleries.py
index 366374b..f835444 100644
--- a/nikola/plugins/task/galleries.py
+++ b/nikola/plugins/task/galleries.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
-import codecs
+import io
import datetime
import glob
import json
@@ -55,6 +55,8 @@ from nikola import utils
from nikola.post import Post
from nikola.utils import req_missing
+_image_size_cache = {}
+
class Galleries(Task):
"""Render image galleries."""
@@ -153,6 +155,9 @@ class Galleries(Task):
# Create index.html for each language
for lang in self.kw['translations']:
+ # save navigation links as dependencies
+ self.kw['navigation_links|{0}'.format(lang)] = self.kw['global_context']['navigation_links'](lang)
+
dst = os.path.join(
self.kw['output_folder'],
self.site.path(
@@ -460,8 +465,11 @@ class Galleries(Task):
photo_array = []
for img, thumb, title in zip(img_list, thumbs, img_titles):
- im = Image.open(thumb)
- w, h = im.size
+ w, h = _image_size_cache.get(thumb, (None, None))
+ if w is None:
+ im = Image.open(thumb)
+ w, h = im.size
+ _image_size_cache[thumb] = w, h
# Thumbs are files in output, we need URLs
photo_array.append({
'url': url_from_path(img),
@@ -515,7 +523,7 @@ class Galleries(Task):
rss_obj.rss_attrs["xmlns:dc"] = "http://purl.org/dc/elements/1.1/"
dst_dir = os.path.dirname(output_path)
utils.makedirs(dst_dir)
- with codecs.open(output_path, "wb+", "utf-8") as rss_file:
+ with io.open(output_path, "w+", encoding="utf-8") as rss_file:
data = rss_obj.to_xml(encoding='utf-8')
if isinstance(data, utils.bytes_str):
data = data.decode('utf-8')
diff --git a/nikola/plugins/task/indexes.py b/nikola/plugins/task/indexes.py
index 386cc18..0a2cd02 100644
--- a/nikola/plugins/task/indexes.py
+++ b/nikola/plugins/task/indexes.py
@@ -147,20 +147,29 @@ class Indexes(Task):
groups[dirname].append(p)
for dirname, post_list in groups.items():
context = {}
- context["items"] = [
- (post.title(lang), post.permalink(lang))
- for post in post_list
- ]
+ context["items"] = []
+ should_render = True
output_name = os.path.join(kw['output_folder'], dirname, kw['index_file'])
- task = self.site.generic_post_list_renderer(lang, post_list,
- output_name,
- template_name,
- kw['filters'],
- context)
- task_cfg = {1: task['uptodate'][0].config, 2: kw}
- task['uptodate'] = [config_changed(task_cfg)]
- task['basename'] = self.name
- yield task
+ short_destination = os.path.join(dirname, kw['index_file'])
+ for post in post_list:
+ # If there is an index.html pending to be created from
+ # a story, do not generate the STORY_INDEX
+ if post.destination_path(lang) == short_destination:
+ should_render = False
+ else:
+ context["items"].append((post.title(lang),
+ post.permalink(lang)))
+
+ if should_render:
+ task = self.site.generic_post_list_renderer(lang, post_list,
+ output_name,
+ template_name,
+ kw['filters'],
+ context)
+ task_cfg = {1: task['uptodate'][0].config, 2: kw}
+ task['uptodate'] = [config_changed(task_cfg)]
+ task['basename'] = self.name
+ yield task
def index_path(self, name, lang):
if name not in [None, 0]:
diff --git a/nikola/plugins/task/listings.py b/nikola/plugins/task/listings.py
index a0fe974..79f6763 100644
--- a/nikola/plugins/task/listings.py
+++ b/nikola/plugins/task/listings.py
@@ -73,7 +73,7 @@ class Listings(Task):
code = highlight(fd.read(), lexer,
HtmlFormatter(cssclass='code',
linenos="table", nowrap=False,
- lineanchors=utils.slugify(in_name),
+ lineanchors=utils.slugify(in_name, force=True),
anchorlinenos=True))
# the pygments highlighter uses <div class="codehilite"><pre>
# for code. We switch it to reST's <pre class="code">.
@@ -124,6 +124,9 @@ class Listings(Task):
for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:
uptodate[k] = self.site.GLOBAL_CONTEXT[k](kw['default_lang'])
+ # save navigation links as dependencies
+ uptodate['navigation_links'] = uptodate['c']['navigation_links'](kw['default_lang'])
+
uptodate2 = uptodate.copy()
uptodate2['f'] = files
uptodate2['d'] = dirs
diff --git a/nikola/plugins/task/redirect.py b/nikola/plugins/task/redirect.py
index eccc0ab..e1134bf 100644
--- a/nikola/plugins/task/redirect.py
+++ b/nikola/plugins/task/redirect.py
@@ -24,7 +24,7 @@
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-import codecs
+import io
import os
from nikola.plugin_categories import Task
@@ -60,7 +60,8 @@ class Redirect(Task):
def create_redirect(src, dst):
utils.makedirs(os.path.dirname(src))
- with codecs.open(src, "wb+", "utf8") as fd:
+ with io.open(src, "w+", encoding="utf8") as fd:
fd.write('<!DOCTYPE html><head><title>Redirecting...</title>'
+ '<meta name="robots" content="noindex">'
'<meta http-equiv="refresh" content="0; '
'url={0}"></head><body><p>Page moved <a href="{0}">here</a></p></body>'.format(dst))
diff --git a/nikola/plugins/task/robots.py b/nikola/plugins/task/robots.py
index 9944c0d..b229d37 100644
--- a/nikola/plugins/task/robots.py
+++ b/nikola/plugins/task/robots.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, absolute_import, unicode_literals
-import codecs
+import io
import os
try:
from urlparse import urljoin, urlparse
@@ -51,14 +51,14 @@ class RobotsFile(LateTask):
"robots_exclusions": self.site.config["ROBOTS_EXCLUSIONS"]
}
- if kw["site_url"] != urljoin(kw["site_url"], "/"):
- utils.LOGGER.warn('robots.txt not ending up in server root, will be useless')
-
sitemapindex_url = urljoin(kw["base_url"], "sitemapindex.xml")
robots_path = os.path.join(kw['output_folder'], "robots.txt")
def write_robots():
- with codecs.open(robots_path, 'wb+', 'utf8') as outf:
+ if kw["site_url"] != urljoin(kw["site_url"], "/"):
+ utils.LOGGER.warn('robots.txt not ending up in server root, will be useless')
+
+ with io.open(robots_path, 'w+', encoding='utf8') as outf:
outf.write("Sitemap: {0}\n\n".format(sitemapindex_url))
if kw["robots_exclusions"]:
outf.write("User-Agent: *\n")
diff --git a/nikola/plugins/task/sitemap/__init__.py b/nikola/plugins/task/sitemap/__init__.py
index beac6cb..943e9b2 100644
--- a/nikola/plugins/task/sitemap/__init__.py
+++ b/nikola/plugins/task/sitemap/__init__.py
@@ -25,7 +25,7 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function, absolute_import, unicode_literals
-import codecs
+import io
import datetime
import os
try:
@@ -150,7 +150,7 @@ class Sitemap(LateTask):
continue
if path.endswith('.html') or path.endswith('.htm'):
try:
- if u'<!doctype html' not in codecs.open(real_path, 'r', 'utf8').read(1024).lower():
+ if u'<!doctype html' not in io.open(real_path, 'r', encoding='utf8').read(1024).lower():
# ignores "html" files without doctype
# alexa-verify, google-site-verification, etc.
continue
@@ -160,7 +160,8 @@ class Sitemap(LateTask):
continue
""" put RSS in sitemapindex[] instead of in urlset[], sitemap_path is included after it is generated """
if path.endswith('.xml') or path.endswith('.rss'):
- if u'<rss' in codecs.open(real_path, 'r', 'utf8').read(512) or u'<urlset'and path != sitemap_path:
+ filehead = io.open(real_path, 'r', encoding='utf8').read(512)
+ if u'<rss' in filehead or (u'<urlset' in filehead and path != sitemap_path):
path = path.replace(os.sep, '/')
lastmod = self.get_lastmod(real_path)
loc = urljoin(base_url, base_path + path)
@@ -187,7 +188,7 @@ class Sitemap(LateTask):
def write_sitemap():
# Have to rescan, because files may have been added between
# task dep scanning and task execution
- with codecs.open(sitemap_path, 'wb+', 'utf8') as outf:
+ with io.open(sitemap_path, 'w+', encoding='utf8') as outf:
outf.write(urlset_header)
for k in sorted(urlset.keys()):
outf.write(urlset[k])
@@ -196,7 +197,7 @@ class Sitemap(LateTask):
sitemapindex[sitemap_url] = sitemap_format.format(sitemap_url, self.get_lastmod(sitemap_path))
def write_sitemapindex():
- with codecs.open(sitemapindex_path, 'wb+', 'utf8') as outf:
+ with io.open(sitemapindex_path, 'w+', encoding='utf8') as outf:
outf.write(sitemapindex_header)
for k in sorted(sitemapindex.keys()):
outf.write(sitemapindex[k])
diff --git a/nikola/plugins/task/sources.py b/nikola/plugins/task/sources.py
index 2324af2..4c669c2 100644
--- a/nikola/plugins/task/sources.py
+++ b/nikola/plugins/task/sources.py
@@ -60,11 +60,11 @@ class Sources(Task):
continue
output_name = os.path.join(
kw['output_folder'], post.destination_path(
- lang, post.source_ext()))
- source = post.source_path
- dest_ext = self.site.get_compiler(post.source_path).extension()
- if dest_ext == post.source_ext():
+ lang, post.source_ext(True)))
+ # do not publish PHP sources
+ if post.source_ext(True) == post.compiler.extension():
continue
+ source = post.source_path
if lang != kw["default_lang"]:
source_lang = utils.get_translation_candidate(self.site.config, source, lang)
if os.path.exists(source_lang):
diff --git a/nikola/plugins/task/tags.py b/nikola/plugins/task/tags.py
index f7f3579..8d43f13 100644
--- a/nikola/plugins/task/tags.py
+++ b/nikola/plugins/task/tags.py
@@ -25,7 +25,6 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
-import codecs
import json
import os
try:
@@ -124,8 +123,8 @@ class RenderTags(Task):
def write_tag_data(data):
utils.makedirs(os.path.dirname(output_name))
- with codecs.open(output_name, 'wb+', 'utf8') as fd:
- fd.write(json.dumps(data))
+ with open(output_name, 'w+') as fd:
+ json.dump(data, fd)
task = {
'basename': str(self.name),
@@ -169,7 +168,7 @@ class RenderTags(Task):
else:
context["cat_items"] = None
context["permalink"] = self.site.link("tag_index", None, lang)
- context["description"] = None
+ context["description"] = context["title"]
task = self.site.generic_post_list_renderer(
lang,
[],
@@ -231,7 +230,7 @@ class RenderTags(Task):
page_name(tag, i + 1, lang))
context["permalink"] = self.site.link(kind, tag, lang)
context["tag"] = tag
- context["description"] = None
+ context["description"] = context["title"]
task = self.site.generic_post_list_renderer(
lang,
post_list,
@@ -259,7 +258,7 @@ class RenderTags(Task):
context["permalink"] = self.site.link(kind, tag, lang)
context["tag"] = tag
context["kind"] = kind
- context["description"] = None
+ context["description"] = context["title"]
task = self.site.generic_post_list_renderer(
lang,
post_list,
diff --git a/nikola/plugins/template/jinja.py b/nikola/plugins/template/jinja.py
index 097ec96..5156f38 100644
--- a/nikola/plugins/template/jinja.py
+++ b/nikola/plugins/template/jinja.py
@@ -55,6 +55,8 @@ class JinjaTemplates(TemplateSystem):
self.lookup.lstrip_blocks = True
self.lookup.filters['tojson'] = json.dumps
self.lookup.globals['enumerate'] = enumerate
+ self.lookup.globals['isinstance'] = isinstance
+ self.lookup.globals['tuple'] = tuple
def set_directories(self, directories, cache_folder):
"""Create a template lookup."""