aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/bilibili.py
blob: d5c419eb5b662d764b3967908b538b9d806319d0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# -*- coding: utf-8 -*-

# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.

"""Extractors for https://www.bilibili.com/"""

from .common import Extractor, Message
from .. import text, util, exception


class BilibiliExtractor(Extractor):
    """Base class for bilibili extractors"""
    category = "bilibili"
    root = "https://www.bilibili.com"
    request_interval = (3.0, 6.0)

    def _init(self):
        self.api = BilibiliAPI(self)


class BilibiliUserArticlesExtractor(BilibiliExtractor):
    """Extractor for a bilibili user's articles"""
    subcategory = "user-articles"
    pattern = r"(?:https?://)?space\.bilibili\.com/(\d+)/article"
    example = "https://space.bilibili.com/12345/article"

    def items(self):
        for article in self.api.user_articles(self.groups[0]):
            article["_extractor"] = BilibiliArticleExtractor
            url = "{}/opus/{}".format(self.root, article["opus_id"])
            yield Message.Queue, url, article


class BilibiliArticleExtractor(BilibiliExtractor):
    """Extractor for a bilibili article"""
    subcategory = "article"
    pattern = (r"(?:https?://)?"
               r"(?:t\.bilibili\.com|(?:www\.)?bilibili.com/opus)/(\d+)")
    example = "https://www.bilibili.com/opus/12345"
    directory_fmt = ("{category}", "{username}")
    filename_fmt = "{id}_{num}.{extension}"
    archive_fmt = "{id}_{num}"

    def items(self):
        article = self.api.article(self.groups[0])

        # Flatten modules list
        modules = {}
        for module in article["detail"]["modules"]:
            del module['module_type']
            modules.update(module)
        article["detail"]["modules"] = modules

        article["username"] = modules["module_author"]["name"]

        pics = []
        for paragraph in modules['module_content']['paragraphs']:
            if "pic" not in paragraph:
                continue

            try:
                pics.extend(paragraph["pic"]["pics"])
            except Exception:
                pass

        article["count"] = len(pics)
        yield Message.Directory, article
        for article["num"], pic in enumerate(pics, 1):
            url = pic["url"]
            article.update(pic)
            yield Message.Url, url, text.nameext_from_url(url, article)


class BilibiliAPI():
    def __init__(self, extractor):
        self.extractor = extractor

    def _call(self, endpoint, params):
        url = "https://api.bilibili.com/x/polymer/web-dynamic/v1" + endpoint
        data = self.extractor.request(url, params=params).json()

        if data["code"] != 0:
            self.extractor.log.debug("Server response: %s", data)
            raise exception.StopExtraction("API request failed")

        return data

    def user_articles(self, user_id):
        endpoint = "/opus/feed/space"
        params = {"host_mid": user_id}

        while True:
            data = self._call(endpoint, params)

            for item in data["data"]["items"]:
                params["offset"] = item["opus_id"]
                yield item

            if not data["data"]["has_more"]:
                break

    def article(self, article_id):
        url = "https://www.bilibili.com/opus/" + article_id

        while True:
            page = self.extractor.request(url).text
            try:
                return util.json_loads(text.extr(
                    page, "window.__INITIAL_STATE__=", "};") + "}")
            except Exception:
                if "window._riskdata_" not in page:
                    raise exception.StopExtraction(
                        "%s: Unable to extract INITIAL_STATE data", article_id)
            self.extractor.wait(seconds=300)