aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/sankakucomplex.py
blob: cf5af81d34f3e7c2ea22da8c8e69c2adca3dab42 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
# -*- coding: utf-8 -*-

# Copyright 2019-2025 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.

"""Extractors for https://news.sankakucomplex.com/"""

from .common import Extractor, Message
from .. import text, util


class SankakucomplexExtractor(Extractor):
    """Base class for sankakucomplex extractors"""
    category = "sankakucomplex"
    root = "https://news.sankakucomplex.com"

    def __init__(self, match):
        Extractor.__init__(self, match)
        self.path = match[1]


class SankakucomplexArticleExtractor(SankakucomplexExtractor):
    """Extractor for articles on news.sankakucomplex.com"""
    subcategory = "article"
    directory_fmt = ("{category}", "{date:%Y-%m-%d} {title}")
    filename_fmt = "{filename}.{extension}"
    archive_fmt = "{date:%Y%m%d}_{filename}"
    pattern = (r"(?:https?://)?(?:news|www)\.sankakucomplex\.com"
               r"/(\d\d\d\d/\d\d/\d\d/[^/?#]+)")
    example = "https://news.sankakucomplex.com/1970/01/01/TITLE"

    def items(self):
        url = f"{self.root}/{self.path}/?pg=X"
        extr = text.extract_from(self.request(url).text)
        data = {
            "title"      : text.unescape(
                extr('property="og:title" content="', '"')),
            "description": text.unescape(
                extr('property="og:description" content="', '"')),
            "date"       : self.parse_datetime_iso(
                extr('property="article:published_time" content="', '"')),
        }
        content = extr('<div class="entry-content">', '</article>')
        data["tags"] = text.split_html(extr('="meta-tags">', '</div>'))[::2]

        files = self._extract_images(content)
        if self.config("videos", True):
            files += self._extract_videos(content)
        if self.config("embeds", False):
            files += self._extract_embeds(content)
        data["count"] = len(files)

        yield Message.Directory, "", data
        for num, url in enumerate(files, 1):
            file = text.nameext_from_url(url)
            if url[0] == "/":
                url = text.urljoin(self.root, url)
            file["url"] = url
            file["num"] = num
            file.update(data)
            yield Message.Url, url, file

    def _extract_images(self, content):
        orig_sub = text.re(r"-\d+x\d+\.").sub
        return [
            orig_sub(".", url) for url in
            util.unique(text.extract_iter(content, 'data-lazy-src="', '"'))
        ]

    def _extract_videos(self, content):
        return text.re(r"<source [^>]*src=[\"']([^\"']+)").findall(content)

    def _extract_embeds(self, content):
        return [
            "ytdl:" + url for url in
            text.re(r"<iframe [^>]*src=[\"']([^\"']+)").findall(content)
        ]


class SankakucomplexTagExtractor(SankakucomplexExtractor):
    """Extractor for sankakucomplex blog articles by tag or author"""
    subcategory = "tag"
    pattern = (r"(?:https?://)?(?:news|www)\.sankakucomplex\.com"
               r"/((?:tag|category|author)/[^/?#]+)")
    example = "https://news.sankakucomplex.com/tag/TAG/"

    def items(self):
        pnum = 1
        data = {"_extractor": SankakucomplexArticleExtractor}

        while True:
            url = f"{self.root}/{self.path}/page/{pnum}/"
            response = self.request(url, fatal=False)
            if response.status_code >= 400:
                return
            for url in util.unique_sequence(text.extract_iter(
                    response.text, 'data-direct="', '"')):
                yield Message.Queue, url, data
            pnum += 1