aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/rawkuma.py
diff options
context:
space:
mode:
Diffstat (limited to 'gallery_dl/extractor/rawkuma.py')
-rw-r--r--gallery_dl/extractor/rawkuma.py63
1 files changed, 39 insertions, 24 deletions
diff --git a/gallery_dl/extractor/rawkuma.py b/gallery_dl/extractor/rawkuma.py
index 242486d..a4a0c9b 100644
--- a/gallery_dl/extractor/rawkuma.py
+++ b/gallery_dl/extractor/rawkuma.py
@@ -7,7 +7,7 @@
"""Extractors for https://rawkuma.net/"""
from .common import MangaExtractor, ChapterExtractor
-from .. import text, util
+from .. import text
BASE_PATTERN = r"(?:https?://)?rawkuma\.(?:net|com)"
@@ -21,43 +21,40 @@ class RawkumaBase():
class RawkumaChapterExtractor(RawkumaBase, ChapterExtractor):
"""Extractor for manga chapters from rawkuma.net"""
archive_fmt = "{chapter_id}_{page}"
- pattern = BASE_PATTERN + r"/([^/?#]+-chapter-\d+(?:-\d+)?)"
- example = "https://rawkuma.net/TITLE-chapter-123/"
+ pattern = rf"{BASE_PATTERN}(/manga/[^/?#]+/chapter-\d+(?:.\d+)?\.(\d+))"
+ example = "https://rawkuma.net/manga/7TITLE/chapter-123.321"
def __init__(self, match):
url = f"{self.root}/{match[1]}/"
ChapterExtractor.__init__(self, match, url)
def metadata(self, page):
- item = util.json_loads(text.extr(page, ',"item":', "}};"))
- title = text.rextr(
- page, '<h1 class="entry-title', "</h1>").partition(" &#8211; ")[2]
- date = text.extr(page, 'datetime="', '"')
- chapter, sep, minor = item["c"].partition(".")
+ manga, _, chapter = text.extr(
+ page, '<title>', "<").rpartition(" Chapter ")
+ chapter, sep, minor = chapter.partition(" &#8211; ")[0].partition(".")
return {
- "manga" : item["s"],
- "manga_id" : text.parse_int(item["mid"]),
+ "manga" : text.unescape(manga),
+ "manga_id" : text.parse_int(text.extr(page, "manga_id=", "&")),
"chapter" : text.parse_int(chapter),
"chapter_minor": sep + minor,
- "chapter_id" : text.parse_int(item["cid"]),
- "title" : text.unescape(title),
- "date" : text.parse_datetime(
- date, "%Y-%m-%dWIB%H:%M:%S%z"),
- "thumbnail" : item.get("t"),
+ "chapter_id" : text.parse_int(self.groups[-1]),
+ # "title" : text.unescape(title),
+ "date" : self.parse_datetime_iso(text.extr(
+ page, 'datetime="', '"')),
"lang" : "ja",
"language" : "Japanese",
}
def images(self, page):
- images = util.json_loads(text.extr(page, '","images":', '}'))
- return [(url, None) for url in images]
+ return [(url, None) for url in text.extract_iter(
+ page, "<img src='", "'")]
class RawkumaMangaExtractor(RawkumaBase, MangaExtractor):
"""Extractor for manga from rawkuma.net"""
chapterclass = RawkumaChapterExtractor
- pattern = BASE_PATTERN + r"/manga/([^/?#]+)"
+ pattern = rf"{BASE_PATTERN}/manga/([^/?#]+)"
example = "https://rawkuma.net/manga/TITLE/"
def __init__(self, match):
@@ -66,18 +63,36 @@ class RawkumaMangaExtractor(RawkumaBase, MangaExtractor):
def chapters(self, page):
manga = text.unescape(text.extr(page, "<title>", " &#8211; "))
+ manga_id = text.parse_int(text.extr(page, "manga_id=", "&"))
+
+ url = f"{self.root}/wp-admin/admin-ajax.php"
+ params = {
+ "manga_id": manga_id,
+ "page" : "1",
+ "action" : "chapter_list",
+ }
+ headers = {
+ "HX-Request" : "true",
+ "HX-Trigger" : "chapter-list",
+ "HX-Target" : "chapter-list",
+ "HX-Current-URL": self.page_url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ }
+ html = self.request(url, params=params, headers=headers).text
results = []
- for chbox in text.extract_iter(
- page, '<li data-num="', "</a>"):
- info = text.extr(chbox, '', '"')
- chapter, _, title = info.partition(" - ")
+ for url in text.extract_iter(html, '<a href="', '"'):
+ info = url[url.rfind("-")+1:-1]
+ chapter, _, chapter_id = info.rpartition(".")
chapter, sep, minor = chapter.partition(".")
- results.append((text.extr(chbox, 'href="', '"'), {
+ results.append((url, {
"manga" : manga,
+ "manga_id" : manga_id,
"chapter" : text.parse_int(chapter),
"chapter-minor": sep + minor,
- "title" : title,
+ "chapter_id" : text.parse_int(chapter_id),
}))
return results