1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
# -*- coding: utf-8 -*-
# Copyright 2015-2023 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://www.mangahere.cc/"""
from .common import ChapterExtractor, MangaExtractor
from .. import text
import re
class MangahereBase():
"""Base class for mangahere extractors"""
category = "mangahere"
root = "https://www.mangahere.cc"
root_mobile = "https://m.mangahere.cc"
url_fmt = root_mobile + "/manga/{}/{}.html"
class MangahereChapterExtractor(MangahereBase, ChapterExtractor):
"""Extractor for manga-chapters from mangahere.cc"""
pattern = (r"(?:https?://)?(?:www\.|m\.)?mangahere\.c[co]/manga/"
r"([^/]+(?:/v0*(\d+))?/c([^/?#]+))")
example = "https://www.mangahere.cc/manga/TITLE/c001/1.html"
def __init__(self, match):
self.part, self.volume, self.chapter = match.groups()
url = self.url_fmt.format(self.part, 1)
ChapterExtractor.__init__(self, match, url)
def _init(self):
self.session.headers["Referer"] = self.root_mobile + "/"
def metadata(self, page):
pos = page.index("</select>")
count , pos = text.extract(page, ">", "<", pos - 20)
manga_id , pos = text.extract(page, "series_id = ", ";", pos)
chapter_id, pos = text.extract(page, "chapter_id = ", ";", pos)
manga , pos = text.extract(page, '"name":"', '"', pos)
chapter, dot, minor = self.chapter.partition(".")
return {
"manga": text.unescape(manga),
"manga_id": text.parse_int(manga_id),
"title": self._get_title(),
"volume": text.parse_int(self.volume),
"chapter": text.parse_int(chapter),
"chapter_minor": dot + minor,
"chapter_id": text.parse_int(chapter_id),
"count": text.parse_int(count),
"lang": "en",
"language": "English",
}
def images(self, page):
pnum = 1
while True:
url, pos = text.extract(page, '<img src="', '"')
yield text.ensure_http_scheme(url), None
url, pos = text.extract(page, ' src="', '"', pos)
yield text.ensure_http_scheme(url), None
pnum += 2
page = self.request(self.url_fmt.format(self.part, pnum)).text
def _get_title(self):
url = "{}/manga/{}/".format(self.root, self.part)
page = self.request(url).text
try:
pos = page.index(self.part) + len(self.part)
pos = page.index(self.part, pos) + len(self.part)
return text.extract(page, ' title="', '"', pos)[0]
except ValueError:
return ""
class MangahereMangaExtractor(MangahereBase, MangaExtractor):
"""Extractor for manga from mangahere.cc"""
chapterclass = MangahereChapterExtractor
pattern = (r"(?:https?://)?(?:www\.|m\.)?mangahere\.c[co]"
r"(/manga/[^/?#]+/?)(?:#.*)?$")
example = "https://www.mangahere.cc/manga/TITLE"
def _init(self):
self.cookies.set("isAdult", "1", domain="www.mangahere.cc")
def chapters(self, page):
results = []
manga, pos = text.extract(page, '<meta name="og:title" content="', '"')
manga = text.unescape(manga)
page = text.extract(
page, 'id="chapterlist"', 'class="detail-main-list-more"', pos)[0]
pos = 0
while True:
url, pos = text.extract(page, ' href="', '"', pos)
if not url:
return results
info, pos = text.extract(page, 'class="title3">', '<', pos)
date, pos = text.extract(page, 'class="title2">', '<', pos)
match = re.match(
r"(?:Vol\.0*(\d+) )?Ch\.0*(\d+)(\S*)(?: - (.*))?", info)
if match:
volume, chapter, minor, title = match.groups()
else:
chapter, _, minor = url[:-1].rpartition("/c")[2].partition(".")
minor = "." + minor
volume = 0
title = ""
results.append((text.urljoin(self.root, url), {
"manga": manga,
"title": text.unescape(title) if title else "",
"volume": text.parse_int(volume),
"chapter": text.parse_int(chapter),
"chapter_minor": minor,
"date": date,
"lang": "en",
"language": "English",
}))
|