aboutsummaryrefslogtreecommitdiffstats
path: root/gallery_dl/extractor/reddit.py
diff options
context:
space:
mode:
authorLibravatarUnit 193 <unit193@unit193.net>2025-07-31 01:22:01 -0400
committerLibravatarUnit 193 <unit193@unit193.net>2025-07-31 01:22:01 -0400
commita6e995c093de8aae2e91a0787281bb34c0b871eb (patch)
tree2d79821b05300d34d8871eb6c9662b359a2de85d /gallery_dl/extractor/reddit.py
parent7672a750cb74bf31e21d76aad2776367fd476155 (diff)
New upstream version 1.30.2.upstream/1.30.2
Diffstat (limited to 'gallery_dl/extractor/reddit.py')
-rw-r--r--gallery_dl/extractor/reddit.py44
1 files changed, 19 insertions, 25 deletions
diff --git a/gallery_dl/extractor/reddit.py b/gallery_dl/extractor/reddit.py
index 76eadc4..c87430b 100644
--- a/gallery_dl/extractor/reddit.py
+++ b/gallery_dl/extractor/reddit.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-# Copyright 2017-2023 Mike Fährmann
+# Copyright 2017-2025 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -33,8 +33,7 @@ class RedditExtractor(Extractor):
previews = self.config("previews", True)
embeds = self.config("embeds", True)
- videos = self.config("videos", True)
- if videos:
+ if videos := self.config("videos", True):
if videos == "ytdl":
self._extract_video = self._extract_video_ytdl
elif videos == "dash":
@@ -139,9 +138,8 @@ class RedditExtractor(Extractor):
)):
continue
- match = match_submission(url)
- if match:
- extra.append(match.group(1))
+ if match := match_submission(url):
+ extra.append(match[1])
elif not match_user(url) and not match_subreddit(url):
if previews and "comment" not in data and \
"preview" in data:
@@ -181,8 +179,7 @@ class RedditExtractor(Extractor):
submission["id"], item["media_id"], data.get("status"))
continue
src = data["s"]
- url = src.get("u") or src.get("gif") or src.get("mp4")
- if url:
+ if url := src.get("u") or src.get("gif") or src.get("mp4"):
yield url.partition("?")[0].replace("/preview.", "/i.", 1)
else:
self.log.error(
@@ -202,8 +199,7 @@ class RedditExtractor(Extractor):
submission["id"], mid, data.get("status"))
continue
src = data["s"]
- url = src.get("u") or src.get("gif") or src.get("mp4")
- if url:
+ if url := src.get("u") or src.get("gif") or src.get("mp4"):
yield url.partition("?")[0].replace("/preview.", "/i.", 1)
else:
self.log.error(
@@ -242,8 +238,7 @@ class RedditExtractor(Extractor):
try:
for image in post["preview"]["images"]:
- variants = image.get("variants")
- if variants:
+ if variants := image.get("variants"):
if "gif" in variants:
yield variants["gif"]["source"]["url"]
if "mp4" in variants:
@@ -309,7 +304,7 @@ class RedditSubmissionExtractor(RedditExtractor):
def __init__(self, match):
RedditExtractor.__init__(self, match)
- self.submission_id = match.group(1)
+ self.submission_id = match[1]
def submissions(self):
return (self.api.submission(self.submission_id),)
@@ -326,17 +321,17 @@ class RedditImageExtractor(Extractor):
def __init__(self, match):
Extractor.__init__(self, match)
- domain = match.group(1)
- self.path = match.group(2)
+ domain = match[1]
+ self.path = match[2]
if domain == "preview.redd.it":
self.domain = "i.redd.it"
self.query = ""
else:
self.domain = domain
- self.query = match.group(3) or ""
+ self.query = match[3] or ""
def items(self):
- url = "https://{}/{}{}".format(self.domain, self.path, self.query)
+ url = f"https://{self.domain}/{self.path}{self.query}"
data = text.nameext_from_url(url)
yield Message.Directory, data
yield Message.Url, url, data
@@ -355,8 +350,7 @@ class RedditRedirectExtractor(Extractor):
sub_type, subreddit, share_url = self.groups
if sub_type == "u":
sub_type = "user"
- url = "https://www.reddit.com/{}/{}/s/{}".format(
- sub_type, subreddit, share_url)
+ url = f"https://www.reddit.com/{sub_type}/{subreddit}/s/{share_url}"
location = self.request_location(url, notfound="submission")
data = {"_extractor": RedditSubmissionExtractor}
yield Message.Queue, location, data
@@ -478,8 +472,8 @@ class RedditAPI():
if response.status_code != 200:
self.log.debug("Server response: %s", data)
- raise exception.AuthenticationError('"{}: {}"'.format(
- data.get("error"), data.get("message")))
+ raise exception.AuthenticationError(
+ f"\"{data.get('error')}: {data.get('message')}\"")
return "Bearer " + data["access_token"]
def _call(self, endpoint, params):
@@ -508,7 +502,8 @@ class RedditAPI():
try:
data = response.json()
except ValueError:
- raise exception.StopExtraction(text.remove_html(response.text))
+ raise exception.AbortExtraction(
+ text.remove_html(response.text))
if "error" in data:
if data["error"] == 403:
@@ -516,7 +511,7 @@ class RedditAPI():
if data["error"] == 404:
raise exception.NotFoundError()
self.log.debug(data)
- raise exception.StopExtraction(data.get("message"))
+ raise exception.AbortExtraction(data.get("message"))
return data
def _pagination(self, endpoint, params):
@@ -573,8 +568,7 @@ class RedditAPI():
sid = self.extractor.config(key)
return self._decode(sid.rpartition("_")[2].lower()) if sid else default
- @staticmethod
- def _decode(sid):
+ def _decode(self, sid):
return util.bdecode(sid, "0123456789abcdefghijklmnopqrstuvwxyz")