aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/arena.py
blob: ada2fa1d9a712babbd08b516be98cd865078367d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
# -*- coding: utf-8 -*-

# Copyright 2025 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.

"""Extractor for https://are.na/"""

from .common import GalleryExtractor


class ArenaChannelExtractor(GalleryExtractor):
    """Extractor for are.na channels"""
    category = "arena"
    subcategory = "channel"
    root = "https://are.na"
    directory_fmt = ("{category}", "{user[full_name]} ({user[id]})",
                     "{channel[title]} ({channel[id]})")
    filename_fmt = "{num:>03}{block[id]:? //}.{extension}"
    archive_fmt = "{channel[id]}/{block[id]}"
    pattern = r"(?:https?://)?(?:www\.)?are\.na/[^/?#]+/([^/?#]+)"
    example = "https://are.na/evan-collins-1522646491/cassette-futurism"

    def metadata(self, page):
        channel = self.request_json(
            f"https://api.are.na/v2/channels/{self.groups[0]}")

        channel["date"] = self.parse_datetime_iso(
            channel["created_at"])
        channel["date_updated"] = self.parse_datetime_iso(
            channel["updated_at"])
        channel.pop("contents", None)

        return {
            "count"  : channel.get("length"),
            "user"   : channel.pop("user", None),
            "owner"  : channel.pop("owner", None),
            "channel": channel,
        }

    def images(self, page):
        api = f"https://api.are.na/v2/channels/{self.groups[0]}/contents"
        limit = 100
        params = {"page": 1, "per": limit}

        while True:
            data = self.request_json(api, params=params)

            contents = data.get("contents")
            if not contents:
                return

            for block in contents:
                url = None

                # Attachments (e.g., PDFs, files)
                if attachment := block.get("attachment"):
                    url = attachment.get("url")

                # Images
                elif image := block.get("image"):
                    # Prefer original image
                    if original := image.get("original"):
                        url = original.get("url")
                    # Fallback to display/large image if present
                    elif display := image.get("display"):
                        url = display.get("url")
                    elif large := image.get("large"):
                        url = large.get("url")

                # Some Links/Channels may not have downloadable media
                if not url:
                    continue

                block["date"] = self.parse_datetime_iso(
                    block["created_at"])
                block["date_updated"] = self.parse_datetime_iso(
                    block["updated_at"])

                yield url, {
                    "block" : block,
                    "source": block.pop("source", None),
                }

            if len(contents) < limit:
                return
            params["page"] += 1