1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
# -*- coding: utf-8 -*-
# Copyright 2021-2025 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extractors for https://comicvine.gamespot.com/"""
from .booru import BooruExtractor
from .. import text
import operator
class ComicvineTagExtractor(BooruExtractor):
"""Extractor for a gallery on comicvine.gamespot.com"""
category = "comicvine"
subcategory = "tag"
basecategory = ""
root = "https://comicvine.gamespot.com"
per_page = 1000
directory_fmt = ("{category}", "{tag}")
filename_fmt = "{filename}.{extension}"
archive_fmt = "{id}"
pattern = (r"(?:https?://)?comicvine\.gamespot\.com"
r"(/([^/?#]+)/(\d+-\d+)/images/.*)")
example = "https://comicvine.gamespot.com/TAG/123-45/images/"
def __init__(self, match):
BooruExtractor.__init__(self, match)
self.path, self.object_name, self.object_id = match.groups()
def metadata(self):
return {"tag": text.unquote(self.object_name)}
def posts(self):
url = self.root + "/js/image-data.json"
params = {
"images": text.extract(
self.request(self.root + self.path).text,
'data-gallery-id="', '"')[0],
"start" : self.page_start,
"count" : self.per_page,
"object": self.object_id,
}
while True:
images = self.request_json(url, params=params)["images"]
yield from images
if len(images) < self.per_page:
return
params["start"] += self.per_page
def skip(self, num):
self.page_start = num
return num
_file_url = operator.itemgetter("original")
def _prepare(self, post):
post["date"] = self.parse_datetime(
post["dateCreated"], "%a, %b %d %Y")
post["tags"] = [tag["name"] for tag in post["tags"] if tag["name"]]
|