diff options
Diffstat (limited to 'gallery_dl/extractor/idolcomplex.py')
| -rw-r--r-- | gallery_dl/extractor/idolcomplex.py | 129 |
1 files changed, 74 insertions, 55 deletions
diff --git a/gallery_dl/extractor/idolcomplex.py b/gallery_dl/extractor/idolcomplex.py index b7b6ef1..5c7a1b3 100644 --- a/gallery_dl/extractor/idolcomplex.py +++ b/gallery_dl/extractor/idolcomplex.py @@ -15,15 +15,17 @@ from .. import text, util, exception import collections import re +BASE_PATTERN = r"(?:https?://)?idol\.sankakucomplex\.com(?:/[a-z]{2})?" + class IdolcomplexExtractor(SankakuExtractor): """Base class for idolcomplex extractors""" category = "idolcomplex" + root = "https://idol.sankakucomplex.com" cookies_domain = "idol.sankakucomplex.com" - cookies_names = ("login", "pass_hash") - root = "https://" + cookies_domain + cookies_names = ("_idolcomplex_session",) referer = False - request_interval = 5.0 + request_interval = (4.0, 6.0) def __init__(self, match): SankakuExtractor.__init__(self, match) @@ -32,14 +34,16 @@ class IdolcomplexExtractor(SankakuExtractor): self.start_post = 0 def _init(self): - self.extags = self.config("tags", False) + self.find_tags = re.compile( + r'tag-type-([^"]+)">\s*<div [^>]+>\s*<a href="/\?tags=([^"]+)' + ).findall def items(self): self.login() data = self.metadata() for post_id in util.advance(self.post_ids(), self.start_post): - post = self._parse_post(post_id) + post = self._extract_post(post_id) url = post["file_url"] post.update(data) text.nameext_from_url(url, post) @@ -67,63 +71,75 @@ class IdolcomplexExtractor(SankakuExtractor): def _login_impl(self, username, password): self.log.info("Logging in as %s", username) - url = self.root + "/user/authenticate" + url = self.root + "/users/login" + page = self.request(url).text + + headers = { + "Referer": url, + } + url = self.root + (text.extr(page, '<form action="', '"') or + "/en/user/authenticate") data = { + "authenticity_token": text.unescape(text.extr( + page, 'name="authenticity_token" value="', '"')), "url" : "", "user[name]" : username, "user[password]": password, "commit" : "Login", } - response = self.request(url, method="POST", data=data) + response = self.request(url, method="POST", headers=headers, data=data) - if not response.history or response.url != self.root + "/user/home": + if not response.history or response.url.endswith("/user/home"): raise exception.AuthenticationError() - cookies = response.history[0].cookies - return {c: cookies[c] for c in self.cookies_names} + return {c.name: c.value for c in response.history[0].cookies} - def _parse_post(self, post_id): - """Extract metadata of a single post""" - url = self.root + "/post/show/" + post_id + def _extract_post(self, post_id): + url = self.root + "/posts/" + post_id page = self.request(url, retries=10).text - extr = text.extract + extr = text.extract_from(page) - tags , pos = extr(page, "<title>", " | ") - vavg , pos = extr(page, "itemprop=ratingValue>", "<", pos) - vcnt , pos = extr(page, "itemprop=reviewCount>", "<", pos) - _ , pos = extr(page, "Posted: <", "", pos) - created, pos = extr(page, ' title="', '"', pos) - rating = extr(page, "<li>Rating: ", "<", pos)[0] + tags = extr("<title>", " | ") + vavg = extr('itemprop="ratingValue">', "<") + vcnt = extr('itemprop="reviewCount">', "<") + pid = extr(">Post ID:", "<") + created = extr(' title="', '"') - file_url, pos = extr(page, '<li>Original: <a href="', '"', pos) + file_url = extr('>Original:', 'id=') if file_url: - width , pos = extr(page, '>', 'x', pos) - height, pos = extr(page, '', ' ', pos) + file_url = extr(' href="', '"') + width = extr(">", "x") + height = extr("", " ") else: - width , pos = extr(page, '<object width=', ' ', pos) - height, pos = extr(page, 'height=', '>', pos) - file_url = extr(page, '<embed src="', '"', pos)[0] + width = extr('<object width=', ' ') + height = extr('height=', '>') + file_url = extr('<embed src="', '"') + + rating = extr(">Rating:", "<br") data = { - "id": text.parse_int(post_id), - "md5": file_url.rpartition("/")[2].partition(".")[0], - "tags": text.unescape(tags), + "id" : text.parse_int(pid), + "md5" : file_url.rpartition("/")[2].partition(".")[0], + "tags" : text.unescape(tags), "vote_average": text.parse_float(vavg), - "vote_count": text.parse_int(vcnt), - "created_at": created, - "rating": (rating or "?")[0].lower(), - "file_url": "https:" + text.unescape(file_url), - "width": text.parse_int(width), - "height": text.parse_int(height), + "vote_count" : text.parse_int(vcnt), + "created_at" : created, + "date" : text.parse_datetime( + created, "%Y-%m-%d %H:%M:%S.%f"), + "rating" : text.remove_html(rating).lower(), + "file_url" : "https:" + text.unescape(file_url), + "width" : text.parse_int(width), + "height" : text.parse_int(height), } - if self.extags: - tags = collections.defaultdict(list) - tags_html = text.extr(page, '<ul id=tag-sidebar>', '</ul>') - pattern = re.compile(r'tag-type-([^>]+)><a href="/\?tags=([^"]+)') - for tag_type, tag_name in pattern.findall(tags_html or ""): - tags[tag_type].append(text.unquote(tag_name)) - for key, value in tags.items(): - data["tags_" + key] = " ".join(value) + tags = collections.defaultdict(list) + tags_list = [] + tags_html = text.extr(page, '<ul id="tag-sidebar"', '</ul>') + for tag_type, tag_name in self.find_tags(tags_html or ""): + tags[tag_type].append(text.unquote(tag_name)) + for key, value in tags.items(): + data["tags_" + key] = " ".join(value) + tags_list += value + data["tags"] = " ".join(tags_list) return data @@ -178,15 +194,16 @@ class IdolcomplexTagExtractor(IdolcomplexExtractor): while True: page = self.request(self.root, params=params, retries=10).text - pos = page.find("<div id=more-popular-posts-link>") + 1 - yield from text.extract_iter(page, '" id=p', '>', pos) + pos = ((page.find('id="more-popular-posts-link"') + 1) or + (page.find('<span class="thumb') + 1)) + yield from text.extract_iter(page, ' href="/posts/', '"', pos) next_url = text.extract(page, 'next-page-url="', '"', pos)[0] if not next_url: return - next_params = text.parse_query(text.unescape( - next_url).lstrip("?/")) + next_params = text.parse_query(text.unescape(text.unescape( + next_url).lstrip("?/"))) if "next" in next_params: # stop if the same "next" value occurs twice in a row (#265) @@ -201,8 +218,8 @@ class IdolcomplexPoolExtractor(IdolcomplexExtractor): subcategory = "pool" directory_fmt = ("{category}", "pool", "{pool}") archive_fmt = "p_{pool}_{id}" - pattern = r"(?:https?://)?idol\.sankakucomplex\.com/pool/show/(\d+)" - example = "https://idol.sankakucomplex.com/pool/show/12345" + pattern = r"(?:https?://)?idol\.sankakucomplex\.com/pools?/show/(\d+)" + example = "https://idol.sankakucomplex.com/pools/show/12345" per_page = 24 def __init__(self, match): @@ -219,15 +236,17 @@ class IdolcomplexPoolExtractor(IdolcomplexExtractor): return {"pool": self.pool_id} def post_ids(self): - url = self.root + "/pool/show/" + self.pool_id + url = self.root + "/pools/show/" + self.pool_id params = {"page": self.start_page} while True: page = self.request(url, params=params, retries=10).text - ids = list(text.extract_iter(page, '" id=p', '>')) + pos = page.find('id="pool-show"') + 1 + post_ids = list(text.extract_iter( + page, ' href="/posts/', '"', pos)) - yield from ids - if len(ids) < self.per_page: + yield from post_ids + if len(post_ids) < self.per_page: return params["page"] += 1 @@ -236,8 +255,8 @@ class IdolcomplexPostExtractor(IdolcomplexExtractor): """Extractor for single images from idol.sankakucomplex.com""" subcategory = "post" archive_fmt = "{id}" - pattern = r"(?:https?://)?idol\.sankakucomplex\.com/post/show/(\d+)" - example = "https://idol.sankakucomplex.com/post/show/12345" + pattern = BASE_PATTERN + r"/posts?/(?:show/)?([0-9a-f]+)" + example = "https://idol.sankakucomplex.com/posts/0123456789abcdef" def __init__(self, match): IdolcomplexExtractor.__init__(self, match) |
