# -*- coding: utf-8 -*- # Copyright 2015-2025 Mike Fährmann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. """Extractors for https://dynasty-scans.com/""" from .common import ChapterExtractor, MangaExtractor, Extractor, Message from .. import text, util BASE_PATTERN = r"(?:https?://)?(?:www\.)?dynasty-scans\.com" class DynastyscansBase(): """Base class for dynastyscans extractors""" category = "dynastyscans" root = "https://dynasty-scans.com" def _parse_image_page(self, image_id): url = f"{self.root}/images/{image_id}" extr = text.extract_from(self.request(url).text) date = extr("class='create_at'>", "") tags = extr("class='tags'>", "") src = extr("class='btn-group'>", "") url = extr(' src="', '"') src = text.extr(src, 'href="', '"') if "Source<" in src else "" return { "url" : self.root + url, "image_id": text.parse_int(image_id), "tags" : text.split_html(tags), "date" : text.remove_html(date), "source" : text.unescape(src), } class DynastyscansChapterExtractor(DynastyscansBase, ChapterExtractor): """Extractor for manga-chapters from dynasty-scans.com""" pattern = BASE_PATTERN + r"(/chapters/[^/?#]+)" example = "https://dynasty-scans.com/chapters/NAME" def metadata(self, page): extr = text.extract_from(page) match = text.re( r"(?:]*>)?([^<]+)(?:)?" # manga name r"(?: ch(\d+)([^:<]*))?" # chapter info r"(?:: (.+))?" # title ).match(extr("

", "")) author = extr(" by ", "") group = extr('"icon-print"> ', '') return { "manga" : text.unescape(match[1]), "chapter" : text.parse_int(match[2]), "chapter_minor": match[3] or "", "title" : text.unescape(match[4] or ""), "author" : text.remove_html(author), "group" : (text.remove_html(group) or text.extr(group, ' alt="', '"')), "date" : self.parse_datetime(extr( '"icon-calendar"> ', '<'), "%b %d, %Y"), "tags" : text.split_html(extr( "class='tags'>", "") for element in root: if element.tag != "entry": continue content = element[6][0] data["author"] = content[0].text[8:] data["scanlator"] = content[1].text[11:] data["tags"] = content[2].text[6:].lower().split(", ") data["title"] = element[5].text data["date"] = self.parse_datetime_iso(element[1].text) data["date_updated"] = self.parse_datetime_iso(element[2].text) yield Message.Queue, element[4].text, data