aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/2chen.py
blob: 0c978897a05af220b492c6417e7413e5c1d2d1be (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# -*- coding: utf-8 -*-

# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.

"""Extractors for https://sturdychan.help/"""

from .common import Extractor, Message
from .. import text

BASE_PATTERN = r"(?:https?://)?(?:sturdychan.help|2chen\.(?:moe|club))"


class _2chenThreadExtractor(Extractor):
    """Extractor for 2chen threads"""
    category = "2chen"
    subcategory = "thread"
    root = "https://sturdychan.help"
    directory_fmt = ("{category}", "{board}", "{thread} {title}")
    filename_fmt = "{time} {filename}.{extension}"
    archive_fmt = "{board}_{thread}_{hash}_{time}"
    pattern = BASE_PATTERN + r"/([^/?#]+)/(\d+)"
    example = "https://sturdychan.help/a/12345/"

    def __init__(self, match):
        Extractor.__init__(self, match)
        self.board, self.thread = match.groups()

    def items(self):
        url = "{}/{}/{}".format(self.root, self.board, self.thread)
        page = self.request(url, encoding="utf-8", notfound="thread").text
        data = self.metadata(page)
        yield Message.Directory, data

        for post in self.posts(page):

            url = post["url"]
            if not url:
                continue
            if url[0] == "/":
                url = self.root + url
            post["url"] = url = url.partition("?")[0]

            post.update(data)
            post["time"] = text.parse_int(post["date"].timestamp())
            yield Message.Url, url, text.nameext_from_url(
                post["filename"], post)

    def metadata(self, page):
        board, pos = text.extract(page, 'class="board">/', '/<')
        title = text.extract(page, "<h3>", "</h3>", pos)[0]
        return {
            "board" : board,
            "thread": self.thread,
            "title" : text.unescape(title),
        }

    def posts(self, page):
        """Return iterable with relevant posts"""
        return map(self.parse, text.extract_iter(
            page, 'class="glass media', '</article>'))

    def parse(self, post):
        extr = text.extract_from(post)
        return {
            "name"    : text.unescape(extr("<span>", "</span>")),
            "date"    : text.parse_datetime(
                extr("<time", "<").partition(">")[2],
                "%d %b %Y (%a) %H:%M:%S"
            ),
            "no"      : extr('href="#p', '"'),
            "url"     : extr('</a><a href="', '"'),
            "filename": text.unescape(extr('download="', '"')),
            "hash"    : extr('data-hash="', '"'),
        }


class _2chenBoardExtractor(Extractor):
    """Extractor for 2chen boards"""
    category = "2chen"
    subcategory = "board"
    root = "https://sturdychan.help"
    pattern = BASE_PATTERN + r"/([^/?#]+)(?:/catalog|/?$)"
    example = "https://sturdychan.help/a/"

    def __init__(self, match):
        Extractor.__init__(self, match)
        self.board = match.group(1)

    def items(self):
        url = "{}/{}/catalog".format(self.root, self.board)
        page = self.request(url, notfound="board").text
        data = {"_extractor": _2chenThreadExtractor}
        for thread in text.extract_iter(
                page, '<figure><a href="', '"'):
            yield Message.Queue, self.root + thread, data