diff options
Diffstat (limited to 'gallery_dl/job.py')
| -rw-r--r-- | gallery_dl/job.py | 64 |
1 files changed, 36 insertions, 28 deletions
diff --git a/gallery_dl/job.py b/gallery_dl/job.py index 667b9b3..20823a6 100644 --- a/gallery_dl/job.py +++ b/gallery_dl/job.py @@ -29,15 +29,9 @@ class Job(): extr.log.job = self extr.log.debug("Using %s for '%s'", extr.__class__.__name__, extr.url) - # url predicates - self.pred_url = self._prepare_predicates( - "image", [util.UniquePredicate()], True) + self.pred_url = self._prepare_predicates("image", True) + self.pred_queue = self._prepare_predicates("chapter", False) - # queue predicates - self.pred_queue = self._prepare_predicates( - "chapter", [], False) - - # category transfer if parent and parent.extractor.config( "category-transfer", parent.extractor.categorytransfer): self.extractor.category = parent.extractor.category @@ -142,7 +136,12 @@ class Job(): if self.userkwds: kwdict.update(self.userkwds) - def _prepare_predicates(self, target, predicates, skip=True): + def _prepare_predicates(self, target, skip=True): + predicates = [] + + if self.extractor.config(target + "-unique"): + predicates.append(util.UniquePredicate()) + pfilter = self.extractor.config(target + "-filter") if pfilter: try: @@ -191,14 +190,18 @@ class DownloadJob(Job): def handle_url(self, url, keywords, fallback=None): """Download the resource specified in 'url'""" + postprocessors = self.postprocessors + pathfmt = self.pathfmt + archive = self.archive + # prepare download - self.pathfmt.set_keywords(keywords) + pathfmt.set_keywords(keywords) - if self.postprocessors: - for pp in self.postprocessors: - pp.prepare(self.pathfmt) + if postprocessors: + for pp in postprocessors: + pp.prepare(pathfmt) - if self.pathfmt.exists(self.archive): + if pathfmt.exists(archive): self.handle_skip() return @@ -215,24 +218,24 @@ class DownloadJob(Job): break else: # download failed - self.log.error( - "Failed to download %s", self.pathfmt.filename or url) + self.log.error("Failed to download %s", + pathfmt.filename or url) return - if not self.pathfmt.temppath: + if not pathfmt.temppath: self.handle_skip() return # run post processors - if self.postprocessors: - for pp in self.postprocessors: - pp.run(self.pathfmt) + if postprocessors: + for pp in postprocessors: + pp.run(pathfmt) # download succeeded - self.pathfmt.finalize() - self.out.success(self.pathfmt.path, 0) - if self.archive: - self.archive.add(keywords) + pathfmt.finalize() + self.out.success(pathfmt.path, 0) + if archive: + archive.add(keywords) self._skipcnt = 0 def handle_urllist(self, urls, keywords): @@ -281,20 +284,22 @@ class DownloadJob(Job): def get_downloader(self, scheme): """Return a downloader suitable for 'scheme'""" - if scheme == "https": - scheme = "http" try: return self.downloaders[scheme] except KeyError: pass klass = downloader.find(scheme) - if klass and config.get(("downloader", scheme, "enabled"), True): + if klass and config.get(("downloader", klass.scheme, "enabled"), True): instance = klass(self.extractor, self.out) else: instance = None self.log.error("'%s:' URLs are not supported/enabled", scheme) - self.downloaders[scheme] = instance + + if klass.scheme == "http": + self.downloaders["http"] = self.downloaders["https"] = instance + else: + self.downloaders[scheme] = instance return instance def initialize(self, keywords=None): @@ -302,7 +307,10 @@ class DownloadJob(Job): self.pathfmt = util.PathFormat(self.extractor) if keywords: self.pathfmt.set_directory(keywords) + self.sleep = self.extractor.config("sleep") + if not self.extractor.config("download", True): + self.download = self.pathfmt.fix_extension skip = self.extractor.config("skip", True) if skip: |
