aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/leakgallery.py
blob: 2939304cc20279e700cbbaf397404f05ddf3a552 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
# -*- coding: utf-8 -*-

# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.

"""Extractors for https://leakgallery.com"""

from .common import Extractor, Message
from .. import text

BASE_PATTERN = r"(?:https?://)?(?:www\.)?leakgallery\.com"


class LeakgalleryExtractor(Extractor):
    category = "leakgallery"
    directory_fmt = ("{category}", "{creator}")
    filename_fmt = "{id}_{filename}.{extension}"
    archive_fmt = "{creator}_{id}"

    def _yield_media_items(self, medias, creator=None):
        seen = set()
        for media in medias:
            path = media["file_path"]
            if path in seen:
                continue
            seen.add(path)

            if creator is None:
                try:
                    media["creator"] = \
                        media["profile"]["username"] or "unknown"
                except Exception:
                    media["creator"] = "unknown"
            else:
                media["creator"] = creator

            media["url"] = url = f"https://cdn.leakgallery.com/{path}"
            text.nameext_from_url(url, media)
            yield Message.Directory, "", media
            yield Message.Url, url, media

    def _pagination(self, type, base, params=None, creator=None, pnum=1):
        while True:
            try:
                data = self.request_json(f"{base}{pnum}", params=params)

                if not data:
                    return
                if "medias" in data:
                    data = data["medias"]
                    if not data or not isinstance(data, list):
                        return

                yield from self._yield_media_items(data, creator)
                pnum += 1
            except Exception as exc:
                self.log.error("Failed to retrieve %s page %s: %s",
                               type, pnum, exc)
                return


class LeakgalleryUserExtractor(LeakgalleryExtractor):
    """Extractor for profile posts on leakgallery.com"""
    subcategory = "user"
    pattern = (
        BASE_PATTERN +
        r"/(?!trending-medias|most-liked|random/medias)([^/?#]+)"
        r"(?:/(Photos|Videos|All))?"
        r"(?:/(MostRecent|MostViewed|MostLiked))?/?$"
    )
    example = "https://leakgallery.com/creator"

    def items(self):
        creator, mtype, msort = self.groups
        base = f"https://api.leakgallery.com/profile/{creator}/"
        params = {"type": mtype or "All", "sort": msort or "MostRecent"}
        return self._pagination(creator, base, params, creator)


class LeakgalleryTrendingExtractor(LeakgalleryExtractor):
    """Extractor for trending posts on leakgallery.com"""
    subcategory = "trending"
    pattern = rf"{BASE_PATTERN}/trending-medias(?:/([\w-]+))?"
    example = "https://leakgallery.com/trending-medias/Week"

    def items(self):
        period = self.groups[0] or "Last-Hour"
        base = f"https://api.leakgallery.com/popular/media/{period}/"
        return self._pagination("trending", base)


class LeakgalleryMostlikedExtractor(LeakgalleryExtractor):
    """Extractor for most liked posts on leakgallery.com"""
    subcategory = "mostliked"
    pattern = rf"{BASE_PATTERN}/most-liked"
    example = "https://leakgallery.com/most-liked"

    def items(self):
        base = "https://api.leakgallery.com/most-liked/"
        return self._pagination("most-liked", base)


class LeakgalleryPostExtractor(LeakgalleryExtractor):
    """Extractor for individual posts on leakgallery.com"""
    subcategory = "post"
    pattern = rf"{BASE_PATTERN}/([^/?#]+)/(\d+)"
    example = "https://leakgallery.com/CREATOR/12345"

    def items(self):
        creator, post_id = self.groups
        url = f"https://leakgallery.com/{creator}/{post_id}"

        try:
            page = self.request(url).text
            video_urls = text.re(
                r"https://cdn\.leakgallery\.com/content[^/?#]*/"
                r"(?:compressed_)?watermark_[^\"]+\."
                r"(?:mp4|mov|m4a|webm)"
            ).findall(page)
            image_urls = text.re(
                r"https://cdn\.leakgallery\.com/content[^/?#]*/"
                r"watermark_[^\"]+\.(?:jpe?g|png)"
            ).findall(page)

            seen = set()
            for url in video_urls + image_urls:
                if url in seen:
                    continue
                seen.add(url)
                data = {
                    "id": post_id,
                    "creator": creator,
                    "url": url,
                }
                text.nameext_from_url(url, data)
                yield Message.Directory, "", data
                yield Message.Url, url, data
        except Exception as exc:
            self.log.error("Failed to extract post page %s/%s: %s",
                           creator, post_id, exc)