summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLibravatarUnit 193 <unit193@unit193.net>2024-06-22 21:19:42 -0400
committerLibravatarUnit 193 <unit193@unit193.net>2024-06-22 21:19:42 -0400
commit408cf4d7719de63b8175bd9b69a7484bc6dba1f8 (patch)
treeeb6ec27c12186e38a97673ec9a1332853aa44175
parent265fafbf1b3d468d4087a5229ca4bad0a651db23 (diff)
parent80e39a8fc7de105510cbbdca8507f2a4b8c9e01d (diff)
Update upstream source from tag 'upstream/1.27.1'
Update to upstream version '1.27.1' with Debian dir 786b4d48b451c0be3e4e14b3739202bb3965566e
-rw-r--r--CHANGELOG.md128
-rw-r--r--PKG-INFO6
-rw-r--r--README.rst4
-rw-r--r--data/completion/_gallery-dl25
-rw-r--r--data/completion/gallery-dl2
-rw-r--r--data/completion/gallery-dl.fish25
-rw-r--r--data/man/gallery-dl.177
-rw-r--r--data/man/gallery-dl.conf.5346
-rw-r--r--gallery_dl.egg-info/PKG-INFO6
-rw-r--r--gallery_dl.egg-info/SOURCES.txt3
-rw-r--r--gallery_dl/__init__.py9
-rw-r--r--gallery_dl/config.py76
-rw-r--r--gallery_dl/cookies.py8
-rw-r--r--gallery_dl/downloader/text.py4
-rw-r--r--gallery_dl/downloader/ytdl.py5
-rw-r--r--gallery_dl/extractor/__init__.py2
-rw-r--r--gallery_dl/extractor/cien.py86
-rw-r--r--gallery_dl/extractor/common.py33
-rw-r--r--gallery_dl/extractor/deviantart.py11
-rw-r--r--gallery_dl/extractor/fanbox.py12
-rw-r--r--gallery_dl/extractor/hentainexus.py176
-rw-r--r--gallery_dl/extractor/hitomi.py1
-rw-r--r--gallery_dl/extractor/instagram.py2
-rw-r--r--gallery_dl/extractor/kemonoparty.py6
-rw-r--r--gallery_dl/extractor/newgrounds.py68
-rw-r--r--gallery_dl/extractor/nijie.py11
-rw-r--r--gallery_dl/extractor/nitter.py20
-rw-r--r--gallery_dl/extractor/oauth.py6
-rw-r--r--gallery_dl/extractor/philomena.py7
-rw-r--r--gallery_dl/extractor/photobucket.py145
-rw-r--r--gallery_dl/extractor/shimmie2.py4
-rw-r--r--gallery_dl/extractor/skeb.py19
-rw-r--r--gallery_dl/extractor/speakerdeck.py44
-rw-r--r--gallery_dl/extractor/szurubooru.py12
-rw-r--r--gallery_dl/extractor/tcbscans.py19
-rw-r--r--gallery_dl/extractor/twibooru.py7
-rw-r--r--gallery_dl/extractor/twitter.py30
-rw-r--r--gallery_dl/extractor/vichan.py4
-rw-r--r--gallery_dl/formatter.py37
-rw-r--r--gallery_dl/option.py164
-rw-r--r--gallery_dl/output.py1
-rw-r--r--gallery_dl/postprocessor/ugoira.py9
-rw-r--r--gallery_dl/util.py15
-rw-r--r--gallery_dl/version.py2
-rw-r--r--setup.py4
-rw-r--r--test/test_config.py12
-rw-r--r--test/test_cookies.py8
-rw-r--r--test/test_downloader.py8
-rw-r--r--test/test_formatter.py12
-rw-r--r--test/test_results.py1
-rw-r--r--test/test_util.py12
51 files changed, 987 insertions, 747 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 91eef9d..ebede9f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,104 +1,40 @@
-## 1.27.0 - 2024-06-01
+## 1.27.1 - 2024-06-22
### Extractors
#### Additions
-- [mastodon] add `favorite`, `list`, and `hashtag` extractors ([#5529](https://github.com/mikf/gallery-dl/issues/5529))
-- [mastodon] add support for card images
-- [pixeldrain] add support for single-file album downloads ([#5641](https://github.com/mikf/gallery-dl/issues/5641))
-- [reddit] support comment embeds ([#5366](https://github.com/mikf/gallery-dl/issues/5366))
-- [seiga] re-implement login with username & password
-- [tapas] add `creator` extractor ([#5306](https://github.com/mikf/gallery-dl/issues/5306))
-- [vsco] add `avatar` extractor ([#5341](https://github.com/mikf/gallery-dl/issues/5341))
-- [wikimedia] support `wiki.gg` wikis
+- [hentainexus] restore module ([#5275](https://github.com/mikf/gallery-dl/issues/5275), [#5712](https://github.com/mikf/gallery-dl/issues/5712))
+- [shimmie2] support `vidya.pics` ([#5632](https://github.com/mikf/gallery-dl/issues/5632))
+- [tcbscans] support other domains ([#5774](https://github.com/mikf/gallery-dl/issues/5774))
#### Fixes
-- [4archive] fix extraction
-- [8chan] fix file downloads by sending a `TOS` cookie ([#5578](https://github.com/mikf/gallery-dl/issues/5578))
-- [artstation] disable TLS 1.2 ciphers by default ([#5564](https://github.com/mikf/gallery-dl/issues/5564), [#5658](https://github.com/mikf/gallery-dl/issues/5658))
-- [bluesky] filter reposts only for user timelines ([#5528](https://github.com/mikf/gallery-dl/issues/5528))
-- [common] disable `check_hostname` for custom SSLContexts ([#3614](https://github.com/mikf/gallery-dl/issues/3614), [#4891](https://github.com/mikf/gallery-dl/issues/4891), [#5576](https://github.com/mikf/gallery-dl/issues/5576))
-- [exhentai] fix Multi-Page Viewer detection ([#4969](https://github.com/mikf/gallery-dl/issues/4969))
-- [exhentai] fix blank page detection
-- [hiperdex] update domain to `hiperdex.top` ([#5635](https://github.com/mikf/gallery-dl/issues/5635))
-- [hotleak] download files returning a 404 status code ([#5395](https://github.com/mikf/gallery-dl/issues/5395))
-- [imgur] match URLs with title slugs ([#5593](https://github.com/mikf/gallery-dl/issues/5593))
-- [kemonoparty] fix `KeyError - 'path'` for posts without files ([#5368](https://github.com/mikf/gallery-dl/issues/5368), [#5394](https://github.com/mikf/gallery-dl/issues/5394), [#5422](https://github.com/mikf/gallery-dl/issues/5422), [#5488](https://github.com/mikf/gallery-dl/issues/5488))
-- [kemonoparty] fix crash on posts with missing datetime info ([#5422](https://github.com/mikf/gallery-dl/issues/5422))
-- [mastodon] send canonical `true`/`false` boolean values ([#5516](https://github.com/mikf/gallery-dl/issues/5516))
-- [newgrounds] update and fix login procedure ([#5109](https://github.com/mikf/gallery-dl/issues/5109))
-- [patreon] fix `bootstrap` data extraction ([#5624](https://github.com/mikf/gallery-dl/issues/5624))
-- [poipiku] fix downloading R-18 posts ([#5567](https://github.com/mikf/gallery-dl/issues/5567))
-- [poipoku] avoid language-specific extraction ([#5590](https://github.com/mikf/gallery-dl/issues/5590), [#5591](https://github.com/mikf/gallery-dl/issues/5591))
-- [realbooru] fix videos and provide fallback URLs ([#2530](https://github.com/mikf/gallery-dl/issues/2530))
-- [slideshare] fix extraction
-- [subscribestar] fix file URLs ([#5631](https://github.com/mikf/gallery-dl/issues/5631))
-- [twitter] update domain to `x.com` ([#5597](https://github.com/mikf/gallery-dl/issues/5597))
-- [twitter] transfer `twitter.com` cookies to `x.com` ([#5597](https://github.com/mikf/gallery-dl/issues/5597))
-- [twitter] prevent crash when extracting `birdwatch` metadata ([#5403](https://github.com/mikf/gallery-dl/issues/5403))
-- [twitter] handle missing `expanded_url` fields ([#5463](https://github.com/mikf/gallery-dl/issues/5463), [#5490](https://github.com/mikf/gallery-dl/issues/5490))
-- [wikimedia] suppress exception for entries without `imageinfo` ([#5384](https://github.com/mikf/gallery-dl/issues/5384))
-- [wikimedia] fix exception for files with empty `metadata`
+- [deviantart] fix watching module ID extraction ([#5696](https://github.com/mikf/gallery-dl/issues/5696), [#5772](https://github.com/mikf/gallery-dl/issues/5772))
+- [fanbox] handle KeyError for no longer existing plans ([#5759](https://github.com/mikf/gallery-dl/issues/5759))
+- [kemonoparty:favorite] fix exception when sorting `null` objects ([#5692](https://github.com/mikf/gallery-dl/issues/5692). [#5721](https://github.com/mikf/gallery-dl/issues/5721))
+- [skeb] fix `429 Too Many Requests` errors ([#5766](https://github.com/mikf/gallery-dl/issues/5766))
+- [speakerdeck] fix extraction ([#5730](https://github.com/mikf/gallery-dl/issues/5730))
+- [twitter] fix duplicate `ArkoseLogin` check
#### Improvements
-- [exhentai] detect CAPTCHAs during login ([#5492](https://github.com/mikf/gallery-dl/issues/5492))
-- [foolfuuka] improve `board` pattern & support pages ([#5408](https://github.com/mikf/gallery-dl/issues/5408))
-- [furaffinity] match `fxfuraffinity.net`/`fxraffinity.net`/`xfuraffinity.net` URLs ([#5511](https://github.com/mikf/gallery-dl/issues/5511), [#5568](https://github.com/mikf/gallery-dl/issues/5568))
-- [gelbooru] improve pagination logic for meta tags ([#5478](https://github.com/mikf/gallery-dl/issues/5478))
-- [kemonoparty:favorite] return artists/posts in native order and support `sort` and `order` query parameters ([#5375](https://github.com/mikf/gallery-dl/issues/5375), [#5620](https://github.com/mikf/gallery-dl/issues/5620))
-- [oauth] use `Extractor.request()` for HTTP requests to support proxy servers etc ([#5433](https://github.com/mikf/gallery-dl/issues/5433))
-- [pixiv] change `sanity_level` debug message to a warning ([#5180](https://github.com/mikf/gallery-dl/issues/5180))
-- [twitter] improve username & password login procedure ([#5445](https://github.com/mikf/gallery-dl/issues/5445))
-- [twitter] wait for rate limit reset before encountering a 429 error ([#5532](https://github.com/mikf/gallery-dl/issues/5532))
-- [twitter] match `fixvx.com` URLs ([#5511](https://github.com/mikf/gallery-dl/issues/5511))
-- [twitter] match Tweet URLs with query parameters ([#5371](https://github.com/mikf/gallery-dl/issues/5371), [#5372](https://github.com/mikf/gallery-dl/issues/5372))
-- [twitter] match `/photo/` and `/video/` Tweet URLs ([#5443](https://github.com/mikf/gallery-dl/issues/5443), [#5601](https://github.com/mikf/gallery-dl/issues/5601))
+- [nijie] support downloading videos ([#5707](https://github.com/mikf/gallery-dl/issues/5707), [#5617](https://github.com/mikf/gallery-dl/issues/5617))
+- [philomena] support downloading `.svg` files ([#5643](https://github.com/mikf/gallery-dl/issues/5643))
+- [szurubooru] support empty tag searches ([#5711](https://github.com/mikf/gallery-dl/issues/5711))
+- [twitter] ignore `Unavailable` media ([#5736](https://github.com/mikf/gallery-dl/issues/5736))
+#### Metadata
+- [hitomi] extract `title_jpn` metadata ([#5706](https://github.com/mikf/gallery-dl/issues/5706))
+- [instagram] extract `liked` metadata ([#5609](https://github.com/mikf/gallery-dl/issues/5609))
#### Options
-- [common] add `sleep-429` option ([#5160](https://github.com/mikf/gallery-dl/issues/5160))
-- [common] implement `skip-filter` option ([#5255](https://github.com/mikf/gallery-dl/issues/5255))
-- [common] implement `keywords-eval` option ([#5621](https://github.com/mikf/gallery-dl/issues/5621))
-- [kemonoparty] add `announcements` option ([#5262](https://github.com/mikf/gallery-dl/issues/5262))
-- [pixiv:novel] add `covers` option ([#5373](https://github.com/mikf/gallery-dl/issues/5373))
-- [twitter] implement `relogin` option ([#5445](https://github.com/mikf/gallery-dl/issues/5445))
+- [newgrounds] extend `format` option ([#5709](https://github.com/mikf/gallery-dl/issues/5709))
+- [twitter] extend `ratelimit` option ([#5532](https://github.com/mikf/gallery-dl/issues/5532))
+- [twitter] add `username-alt` option ([#5715](https://github.com/mikf/gallery-dl/issues/5715))
+#### Removals
+- [photobucket] remove module
+- [nitter] remove instances
+- [vichan] remove `wikieat.club`
### Downloaders
-- [http] add MIME type and signature for `.m4v` files ([#5505](https://github.com/mikf/gallery-dl/issues/5505))
-### Post Processors
-- [mtime] do not overwrite `_mtime` values with `None` ([#5439](https://github.com/mikf/gallery-dl/issues/5439))
-- [ugoira] log errors for general exceptions
-### Archives
-- [archive] move DownloadArchive code into its own module
-- [archive] implement `DownloadArchiveMemory` class ([#5255](https://github.com/mikf/gallery-dl/issues/5255))
-- [archive] add `archive-mode` option ([#5255](https://github.com/mikf/gallery-dl/issues/5255))
-### Cookies
-- [cookies] use temporary file when saving cookies.txt files ([#5461](https://github.com/mikf/gallery-dl/issues/5461))
-- [cookies] optimize `_find_most_recently_used_file()` for exact profiles ([#5538](https://github.com/mikf/gallery-dl/issues/5538))
-- [cookies] set proper `expires` value for Chrome session cookies
-### Documentation
-- [docs] update docs/configuration links ([#5059](https://github.com/mikf/gallery-dl/issues/5059), [#5369](https://github.com/mikf/gallery-dl/issues/5369), [#5423](https://github.com/mikf/gallery-dl/issues/5423))
-- [docs] update link to "nightly" builds ([#5618](https://github.com/mikf/gallery-dl/issues/5618))
-- [docs] replace AnchorJS with custom script
-- [docs] update defaults of `sleep-request`, `browser`, `tls12`
-- [docs] complete Authentication info in docs/supportedsites
+- [ytdl] fix exception due to missing `ext` in unavailable videos ([#5675](https://github.com/mikf/gallery-dl/issues/5675))
### Formatter
-- [formatter] allow dots in `'...'` literals ([#5539](https://github.com/mikf/gallery-dl/issues/5539))
-### Output
-- [output] enable colored output by default
-- [output] extend `output.colors` ([#2566](https://github.com/mikf/gallery-dl/issues/2566))
-- [output] support `NO_COLOR` environment variable
-- [output] add `--no-colors` command-line option
-- [output] add `-w/--warning` command-line option ([#5474](https://github.com/mikf/gallery-dl/issues/5474))
-### Tests
-- [tests] select unused port number for local HTTP server
-- [tests] allow filtering extractor result tests by URL or comment
-- [tests] mark tests with missing auth as `only_matching`
-### Update
-- implement update-related command-line options ([#5233](https://github.com/mikf/gallery-dl/issues/5233))
- - `-U`/`--update` updates an executable file to the latest release
- - `--update-check` checks if the local version is up to date
- - `--update-to` allows switching to a different release channel (`stable` or `dev`)
- as well as upgrading/downgrading to a specific tag.
- - `--update-to dev`
- - `--update-to dev@2024.05.25`
- - `--update-to v1.25.2`
- - (non-executable installations have only access to `-U`/`--update-check` for version checks)
+- implement `C` format specifier ([#5647](https://github.com/mikf/gallery-dl/issues/5647))
+- implement `X` format specifier ([#5770](https://github.com/mikf/gallery-dl/issues/5770))
+### Options
+- add `--no-input` command-line and `input` config option ([#5733](https://github.com/mikf/gallery-dl/issues/5733))
+- add `--config-open` command-line option ([#5713](https://github.com/mikf/gallery-dl/issues/5713))
+- add `--config-status` command-line option ([#5713](https://github.com/mikf/gallery-dl/issues/5713))
### Miscellaneous
-- add workaround for requests 2.32.3 issues ([#5665](https://github.com/mikf/gallery-dl/issues/5665))
-- fix exit status of `--clear-cache`/`--list-extractors`/`--list-modules`
-- restore `LD_LIBRARY_PATH` for executables built with PyInstaller ([#5421](https://github.com/mikf/gallery-dl/issues/5421))
-- store `match` and `groups` values in Extractor objects
+- [actions] fix exception when `msg` is not a string ([#5683](https://github.com/mikf/gallery-dl/issues/5683))
diff --git a/PKG-INFO b/PKG-INFO
index 4b15a5f..a06aa55 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: gallery_dl
-Version: 1.27.0
+Version: 1.27.1
Summary: Command-line program to download image galleries and collections from several image hosting sites
Home-page: https://github.com/mikf/gallery-dl
Download-URL: https://github.com/mikf/gallery-dl/releases/latest
@@ -112,9 +112,9 @@ Standalone Executable
Prebuilt executable files with a Python interpreter and
required Python packages included are available for
-- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.exe>`__
+- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.exe>`__
(Requires `Microsoft Visual C++ Redistributable Package (x86) <https://aka.ms/vs/17/release/vc_redist.x86.exe>`__)
-- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.bin>`__
+- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.bin>`__
Nightly Builds
diff --git a/README.rst b/README.rst
index ce5d504..260d0f4 100644
--- a/README.rst
+++ b/README.rst
@@ -72,9 +72,9 @@ Standalone Executable
Prebuilt executable files with a Python interpreter and
required Python packages included are available for
-- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.exe>`__
+- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.exe>`__
(Requires `Microsoft Visual C++ Redistributable Package (x86) <https://aka.ms/vs/17/release/vc_redist.x86.exe>`__)
-- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.bin>`__
+- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.bin>`__
Nightly Builds
diff --git a/data/completion/_gallery-dl b/data/completion/_gallery-dl
index 81466c9..14b7321 100644
--- a/data/completion/_gallery-dl
+++ b/data/completion/_gallery-dl
@@ -7,18 +7,19 @@ local rc=1
_arguments -s -S \
{-h,--help}'[Print this help message and exit]' \
--version'[Print program version and exit]' \
-{-U,--update-check}'[Check if a newer version is available]' \
{-f,--filename}'[Filename format string for downloaded files ('\''/O'\'' for "original" filenames)]':'<format>' \
{-d,--destination}'[Target location for file downloads]':'<path>' \
{-D,--directory}'[Exact location for file downloads]':'<path>' \
{-X,--extractors}'[Load external extractors from PATH]':'<path>' \
---proxy'[Use the specified proxy]':'<url>' \
---source-address'[Client-side IP address to bind to]':'<ip>' \
--user-agent'[User-Agent request header]':'<ua>' \
--clear-cache'[Delete cached login sessions, cookies, etc. for MODULE (ALL to delete everything)]':'<module>' \
+{-U,--update}'[Update to the latest version]' \
+--update-to'[Switch to a dfferent release channel (stable or dev) or upgrade/downgrade to a specific version]':'<channel[@tag]>' \
+--update-check'[Check if a newer version is available]' \
{-i,--input-file}'[Download URLs found in FILE ('\''-'\'' for stdin). More than one --input-file can be specified]':'<file>':_files \
{-I,--input-file-comment}'[Download URLs found in FILE. Comment them out after they were downloaded successfully.]':'<file>':_files \
{-x,--input-file-delete}'[Download URLs found in FILE. Delete them after they were downloaded successfully.]':'<file>':_files \
+--no-input'[Do not prompt for passwords/tokens]' \
{-q,--quiet}'[Activate quiet mode]' \
{-w,--warning}'[Print only warnings and errors]' \
{-v,--verbose}'[Print various debugging information]' \
@@ -35,26 +36,27 @@ _arguments -s -S \
--write-unsupported'[Write URLs, which get emitted by other extractors but cannot be handled, to FILE]':'<file>':_files \
--write-pages'[Write downloaded intermediary pages to files in the current directory to debug problems]' \
--no-colors'[Do not emit ANSI color codes in output]' \
-{-r,--limit-rate}'[Maximum download rate (e.g. 500k or 2.5M)]':'<rate>' \
{-R,--retries}'[Maximum number of retries for failed HTTP requests or -1 for infinite retries (default: 4)]':'<n>' \
--http-timeout'[Timeout for HTTP connections (default: 30.0)]':'<seconds>' \
+--proxy'[Use the specified proxy]':'<url>' \
+--source-address'[Client-side IP address to bind to]':'<ip>' \
+--no-check-certificate'[Disable HTTPS certificate validation]' \
+{-r,--limit-rate}'[Maximum download rate (e.g. 500k or 2.5M)]':'<rate>' \
+--chunk-size'[Size of in-memory data chunks (default: 32k)]':'<size>' \
--sleep'[Number of seconds to wait before each download. This can be either a constant value or a range (e.g. 2.7 or 2.0-3.5)]':'<seconds>' \
--sleep-request'[Number of seconds to wait between HTTP requests during data extraction]':'<seconds>' \
--sleep-extractor'[Number of seconds to wait before starting data extraction for an input URL]':'<seconds>' \
---filesize-min'[Do not download files smaller than SIZE (e.g. 500k or 2.5M)]':'<size>' \
---filesize-max'[Do not download files larger than SIZE (e.g. 500k or 2.5M)]':'<size>' \
---chunk-size'[Size of in-memory data chunks (default: 32k)]':'<size>' \
--no-part'[Do not use .part files]' \
--no-skip'[Do not skip downloads; overwrite existing files]' \
--no-mtime'[Do not set file modification times according to Last-Modified HTTP response headers]' \
--no-download'[Do not download any files]' \
---no-postprocessors'[Do not run any post processors]' \
---no-check-certificate'[Disable HTTPS certificate validation]' \
{-o,--option}'[Additional options. Example: -o browser=firefox]':'<key=value>' \
{-c,--config}'[Additional configuration files]':'<file>':_files \
--config-yaml'[Additional configuration files in YAML format]':'<file>':_files \
--config-toml'[Additional configuration files in TOML format]':'<file>':_files \
--config-create'[Create a basic configuration file]' \
+--config-status'[Show configuration file status]' \
+--config-open'[Open configuration file in external application]' \
--config-ignore'[Do not read default configuration files]' \
{-u,--username}'[Username to login with]':'<user>' \
{-p,--password}'[Password belonging to the given username]':'<pass>' \
@@ -62,14 +64,17 @@ _arguments -s -S \
{-C,--cookies}'[File to load additional cookies from]':'<file>':_files \
--cookies-export'[Export session cookies to FILE]':'<file>':_files \
--cookies-from-browser'[Name of the browser to load cookies from, with optional domain prefixed with '\''/'\'', keyring name prefixed with '\''+'\'', profile prefixed with '\'':'\'', and container prefixed with '\''::'\'' ('\''none'\'' for no container)]':'<browser[/domain][+keyring][:profile][::container]>' \
---download-archive'[Record all downloaded or skipped files in FILE and skip downloading any file already in it]':'<file>':_files \
{-A,--abort}'[Stop current extractor run after N consecutive file downloads were skipped]':'<n>' \
{-T,--terminate}'[Stop current and parent extractor runs after N consecutive file downloads were skipped]':'<n>' \
+--filesize-min'[Do not download files smaller than SIZE (e.g. 500k or 2.5M)]':'<size>' \
+--filesize-max'[Do not download files larger than SIZE (e.g. 500k or 2.5M)]':'<size>' \
+--download-archive'[Record all downloaded or skipped files in FILE and skip downloading any file already in it]':'<file>':_files \
--range'[Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. '\''5'\'', '\''8-20'\'', or '\''1:24:3'\'')]':'<range>' \
--chapter-range'[Like '\''--range'\'', but applies to manga chapters and other delegated URLs]':'<range>' \
--filter'[Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by '\''-K'\''. Example: --filter "image_width >= 1000 and rating in ('\''s'\'', '\''q'\'')"]':'<expr>' \
--chapter-filter'[Like '\''--filter'\'', but applies to manga chapters and other delegated URLs]':'<expr>' \
{-P,--postprocessor}'[Activate the specified post processor]':'<name>' \
+--no-postprocessors'[Do not run any post processors]' \
{-O,--postprocessor-option}'[Additional post processor options]':'<key=value>' \
--write-metadata'[Write metadata to separate JSON files]' \
--write-info-json'[Write gallery metadata to a info.json file]' \
diff --git a/data/completion/gallery-dl b/data/completion/gallery-dl
index 81a5238..625ecd6 100644
--- a/data/completion/gallery-dl
+++ b/data/completion/gallery-dl
@@ -10,7 +10,7 @@ _gallery_dl()
elif [[ "${prev}" =~ ^()$ ]]; then
COMPREPLY=( $(compgen -d -- "${cur}") )
else
- COMPREPLY=( $(compgen -W "--help --version --update-check --filename --destination --directory --extractors --proxy --source-address --user-agent --clear-cache --input-file --input-file-comment --input-file-delete --quiet --warning --verbose --get-urls --resolve-urls --dump-json --simulate --extractor-info --list-keywords --error-file --list-modules --list-extractors --write-log --write-unsupported --write-pages --no-colors --limit-rate --retries --http-timeout --sleep --sleep-request --sleep-extractor --filesize-min --filesize-max --chunk-size --no-part --no-skip --no-mtime --no-download --no-postprocessors --no-check-certificate --option --config --config-yaml --config-toml --config-create --config-ignore --ignore-config --username --password --netrc --cookies --cookies-export --cookies-from-browser --download-archive --abort --terminate --range --chapter-range --filter --chapter-filter --postprocessor --postprocessor-option --write-metadata --write-info-json --write-infojson --write-tags --zip --cbz --mtime --mtime-from-date --ugoira --ugoira-conv --ugoira-conv-lossless --ugoira-conv-copy --exec --exec-after" -- "${cur}") )
+ COMPREPLY=( $(compgen -W "--help --version --filename --destination --directory --extractors --user-agent --clear-cache --update --update-to --update-check --input-file --input-file-comment --input-file-delete --no-input --quiet --warning --verbose --get-urls --resolve-urls --dump-json --simulate --extractor-info --list-keywords --error-file --list-modules --list-extractors --write-log --write-unsupported --write-pages --no-colors --retries --http-timeout --proxy --source-address --no-check-certificate --limit-rate --chunk-size --sleep --sleep-request --sleep-extractor --no-part --no-skip --no-mtime --no-download --option --config --config-yaml --config-toml --config-create --config-status --config-open --config-ignore --ignore-config --username --password --netrc --cookies --cookies-export --cookies-from-browser --abort --terminate --filesize-min --filesize-max --download-archive --range --chapter-range --filter --chapter-filter --postprocessor --no-postprocessors --postprocessor-option --write-metadata --write-info-json --write-infojson --write-tags --zip --cbz --mtime --mtime-from-date --ugoira --ugoira-conv --ugoira-conv-lossless --ugoira-conv-copy --exec --exec-after" -- "${cur}") )
fi
}
diff --git a/data/completion/gallery-dl.fish b/data/completion/gallery-dl.fish
index 4913c6f..a67cd63 100644
--- a/data/completion/gallery-dl.fish
+++ b/data/completion/gallery-dl.fish
@@ -1,18 +1,19 @@
complete -c gallery-dl -x
complete -c gallery-dl -s 'h' -l 'help' -d 'Print this help message and exit'
complete -c gallery-dl -l 'version' -d 'Print program version and exit'
-complete -c gallery-dl -s 'U' -l 'update-check' -d 'Check if a newer version is available'
complete -c gallery-dl -x -s 'f' -l 'filename' -d 'Filename format string for downloaded files ("/O" for "original" filenames)'
complete -c gallery-dl -x -a '(__fish_complete_directories)' -s 'd' -l 'destination' -d 'Target location for file downloads'
complete -c gallery-dl -x -a '(__fish_complete_directories)' -s 'D' -l 'directory' -d 'Exact location for file downloads'
complete -c gallery-dl -x -a '(__fish_complete_directories)' -s 'X' -l 'extractors' -d 'Load external extractors from PATH'
-complete -c gallery-dl -x -l 'proxy' -d 'Use the specified proxy'
-complete -c gallery-dl -x -l 'source-address' -d 'Client-side IP address to bind to'
complete -c gallery-dl -x -l 'user-agent' -d 'User-Agent request header'
complete -c gallery-dl -x -l 'clear-cache' -d 'Delete cached login sessions, cookies, etc. for MODULE (ALL to delete everything)'
+complete -c gallery-dl -s 'U' -l 'update' -d 'Update to the latest version'
+complete -c gallery-dl -x -l 'update-to' -d 'Switch to a dfferent release channel (stable or dev) or upgrade/downgrade to a specific version'
+complete -c gallery-dl -l 'update-check' -d 'Check if a newer version is available'
complete -c gallery-dl -r -F -s 'i' -l 'input-file' -d 'Download URLs found in FILE ("-" for stdin). More than one --input-file can be specified'
complete -c gallery-dl -r -F -s 'I' -l 'input-file-comment' -d 'Download URLs found in FILE. Comment them out after they were downloaded successfully.'
complete -c gallery-dl -r -F -s 'x' -l 'input-file-delete' -d 'Download URLs found in FILE. Delete them after they were downloaded successfully.'
+complete -c gallery-dl -l 'no-input' -d 'Do not prompt for passwords/tokens'
complete -c gallery-dl -s 'q' -l 'quiet' -d 'Activate quiet mode'
complete -c gallery-dl -s 'w' -l 'warning' -d 'Print only warnings and errors'
complete -c gallery-dl -s 'v' -l 'verbose' -d 'Print various debugging information'
@@ -29,26 +30,27 @@ complete -c gallery-dl -r -F -l 'write-log' -d 'Write logging output to FILE'
complete -c gallery-dl -r -F -l 'write-unsupported' -d 'Write URLs, which get emitted by other extractors but cannot be handled, to FILE'
complete -c gallery-dl -l 'write-pages' -d 'Write downloaded intermediary pages to files in the current directory to debug problems'
complete -c gallery-dl -l 'no-colors' -d 'Do not emit ANSI color codes in output'
-complete -c gallery-dl -x -s 'r' -l 'limit-rate' -d 'Maximum download rate (e.g. 500k or 2.5M)'
complete -c gallery-dl -x -s 'R' -l 'retries' -d 'Maximum number of retries for failed HTTP requests or -1 for infinite retries (default: 4)'
complete -c gallery-dl -x -l 'http-timeout' -d 'Timeout for HTTP connections (default: 30.0)'
+complete -c gallery-dl -x -l 'proxy' -d 'Use the specified proxy'
+complete -c gallery-dl -x -l 'source-address' -d 'Client-side IP address to bind to'
+complete -c gallery-dl -l 'no-check-certificate' -d 'Disable HTTPS certificate validation'
+complete -c gallery-dl -x -s 'r' -l 'limit-rate' -d 'Maximum download rate (e.g. 500k or 2.5M)'
+complete -c gallery-dl -x -l 'chunk-size' -d 'Size of in-memory data chunks (default: 32k)'
complete -c gallery-dl -x -l 'sleep' -d 'Number of seconds to wait before each download. This can be either a constant value or a range (e.g. 2.7 or 2.0-3.5)'
complete -c gallery-dl -x -l 'sleep-request' -d 'Number of seconds to wait between HTTP requests during data extraction'
complete -c gallery-dl -x -l 'sleep-extractor' -d 'Number of seconds to wait before starting data extraction for an input URL'
-complete -c gallery-dl -x -l 'filesize-min' -d 'Do not download files smaller than SIZE (e.g. 500k or 2.5M)'
-complete -c gallery-dl -x -l 'filesize-max' -d 'Do not download files larger than SIZE (e.g. 500k or 2.5M)'
-complete -c gallery-dl -x -l 'chunk-size' -d 'Size of in-memory data chunks (default: 32k)'
complete -c gallery-dl -l 'no-part' -d 'Do not use .part files'
complete -c gallery-dl -l 'no-skip' -d 'Do not skip downloads; overwrite existing files'
complete -c gallery-dl -l 'no-mtime' -d 'Do not set file modification times according to Last-Modified HTTP response headers'
complete -c gallery-dl -l 'no-download' -d 'Do not download any files'
-complete -c gallery-dl -l 'no-postprocessors' -d 'Do not run any post processors'
-complete -c gallery-dl -l 'no-check-certificate' -d 'Disable HTTPS certificate validation'
complete -c gallery-dl -x -s 'o' -l 'option' -d 'Additional options. Example: -o browser=firefox'
complete -c gallery-dl -r -F -s 'c' -l 'config' -d 'Additional configuration files'
complete -c gallery-dl -r -F -l 'config-yaml' -d 'Additional configuration files in YAML format'
complete -c gallery-dl -r -F -l 'config-toml' -d 'Additional configuration files in TOML format'
complete -c gallery-dl -l 'config-create' -d 'Create a basic configuration file'
+complete -c gallery-dl -l 'config-status' -d 'Show configuration file status'
+complete -c gallery-dl -l 'config-open' -d 'Open configuration file in external application'
complete -c gallery-dl -l 'config-ignore' -d 'Do not read default configuration files'
complete -c gallery-dl -l 'ignore-config' -d '==SUPPRESS=='
complete -c gallery-dl -x -s 'u' -l 'username' -d 'Username to login with'
@@ -57,14 +59,17 @@ complete -c gallery-dl -l 'netrc' -d 'Enable .netrc authentication data'
complete -c gallery-dl -r -F -s 'C' -l 'cookies' -d 'File to load additional cookies from'
complete -c gallery-dl -r -F -l 'cookies-export' -d 'Export session cookies to FILE'
complete -c gallery-dl -x -l 'cookies-from-browser' -d 'Name of the browser to load cookies from, with optional domain prefixed with "/", keyring name prefixed with "+", profile prefixed with ":", and container prefixed with "::" ("none" for no container)'
-complete -c gallery-dl -r -F -l 'download-archive' -d 'Record all downloaded or skipped files in FILE and skip downloading any file already in it'
complete -c gallery-dl -x -s 'A' -l 'abort' -d 'Stop current extractor run after N consecutive file downloads were skipped'
complete -c gallery-dl -x -s 'T' -l 'terminate' -d 'Stop current and parent extractor runs after N consecutive file downloads were skipped'
+complete -c gallery-dl -x -l 'filesize-min' -d 'Do not download files smaller than SIZE (e.g. 500k or 2.5M)'
+complete -c gallery-dl -x -l 'filesize-max' -d 'Do not download files larger than SIZE (e.g. 500k or 2.5M)'
+complete -c gallery-dl -r -F -l 'download-archive' -d 'Record all downloaded or skipped files in FILE and skip downloading any file already in it'
complete -c gallery-dl -x -l 'range' -d 'Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. "5", "8-20", or "1:24:3")'
complete -c gallery-dl -x -l 'chapter-range' -d 'Like "--range", but applies to manga chapters and other delegated URLs'
complete -c gallery-dl -x -l 'filter' -d 'Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by "-K". Example: --filter "image_width >= 1000 and rating in ("s", "q")"'
complete -c gallery-dl -x -l 'chapter-filter' -d 'Like "--filter", but applies to manga chapters and other delegated URLs'
complete -c gallery-dl -x -s 'P' -l 'postprocessor' -d 'Activate the specified post processor'
+complete -c gallery-dl -l 'no-postprocessors' -d 'Do not run any post processors'
complete -c gallery-dl -x -s 'O' -l 'postprocessor-option' -d 'Additional post processor options'
complete -c gallery-dl -l 'write-metadata' -d 'Write metadata to separate JSON files'
complete -c gallery-dl -l 'write-info-json' -d 'Write gallery metadata to a info.json file'
diff --git a/data/man/gallery-dl.1 b/data/man/gallery-dl.1
index e964a67..37529bf 100644
--- a/data/man/gallery-dl.1
+++ b/data/man/gallery-dl.1
@@ -1,4 +1,4 @@
-.TH "GALLERY-DL" "1" "2024-06-01" "1.27.0" "gallery-dl Manual"
+.TH "GALLERY-DL" "1" "2024-06-22" "1.27.1" "gallery-dl Manual"
.\" disable hyphenation
.nh
@@ -23,9 +23,6 @@ Print this help message and exit
.B "\-\-version"
Print program version and exit
.TP
-.B "\-U, \-\-update\-check"
-Check if a newer version is available
-.TP
.B "\-f, \-\-filename" \f[I]FORMAT\f[]
Filename format string for downloaded files ('/O' for "original" filenames)
.TP
@@ -38,18 +35,21 @@ Exact location for file downloads
.B "\-X, \-\-extractors" \f[I]PATH\f[]
Load external extractors from PATH
.TP
-.B "\-\-proxy" \f[I]URL\f[]
-Use the specified proxy
-.TP
-.B "\-\-source\-address" \f[I]IP\f[]
-Client-side IP address to bind to
-.TP
.B "\-\-user\-agent" \f[I]UA\f[]
User-Agent request header
.TP
.B "\-\-clear\-cache" \f[I]MODULE\f[]
Delete cached login sessions, cookies, etc. for MODULE (ALL to delete everything)
.TP
+.B "\-U, \-\-update"
+Update to the latest version
+.TP
+.B "\-\-update\-to" \f[I]CHANNEL[@TAG]\f[]
+Switch to a dfferent release channel (stable or dev) or upgrade/downgrade to a specific version
+.TP
+.B "\-\-update\-check"
+Check if a newer version is available
+.TP
.B "\-i, \-\-input\-file" \f[I]FILE\f[]
Download URLs found in FILE ('-' for stdin). More than one --input-file can be specified
.TP
@@ -59,6 +59,9 @@ Download URLs found in FILE. Comment them out after they were downloaded success
.B "\-x, \-\-input\-file\-delete" \f[I]FILE\f[]
Download URLs found in FILE. Delete them after they were downloaded successfully.
.TP
+.B "\-\-no\-input"
+Do not prompt for passwords/tokens
+.TP
.B "\-q, \-\-quiet"
Activate quiet mode
.TP
@@ -107,15 +110,27 @@ Write downloaded intermediary pages to files in the current directory to debug p
.B "\-\-no\-colors"
Do not emit ANSI color codes in output
.TP
-.B "\-r, \-\-limit\-rate" \f[I]RATE\f[]
-Maximum download rate (e.g. 500k or 2.5M)
-.TP
.B "\-R, \-\-retries" \f[I]N\f[]
Maximum number of retries for failed HTTP requests or -1 for infinite retries (default: 4)
.TP
.B "\-\-http\-timeout" \f[I]SECONDS\f[]
Timeout for HTTP connections (default: 30.0)
.TP
+.B "\-\-proxy" \f[I]URL\f[]
+Use the specified proxy
+.TP
+.B "\-\-source\-address" \f[I]IP\f[]
+Client-side IP address to bind to
+.TP
+.B "\-\-no\-check\-certificate"
+Disable HTTPS certificate validation
+.TP
+.B "\-r, \-\-limit\-rate" \f[I]RATE\f[]
+Maximum download rate (e.g. 500k or 2.5M)
+.TP
+.B "\-\-chunk\-size" \f[I]SIZE\f[]
+Size of in-memory data chunks (default: 32k)
+.TP
.B "\-\-sleep" \f[I]SECONDS\f[]
Number of seconds to wait before each download. This can be either a constant value or a range (e.g. 2.7 or 2.0-3.5)
.TP
@@ -125,15 +140,6 @@ Number of seconds to wait between HTTP requests during data extraction
.B "\-\-sleep\-extractor" \f[I]SECONDS\f[]
Number of seconds to wait before starting data extraction for an input URL
.TP
-.B "\-\-filesize\-min" \f[I]SIZE\f[]
-Do not download files smaller than SIZE (e.g. 500k or 2.5M)
-.TP
-.B "\-\-filesize\-max" \f[I]SIZE\f[]
-Do not download files larger than SIZE (e.g. 500k or 2.5M)
-.TP
-.B "\-\-chunk\-size" \f[I]SIZE\f[]
-Size of in-memory data chunks (default: 32k)
-.TP
.B "\-\-no\-part"
Do not use .part files
.TP
@@ -146,12 +152,6 @@ Do not set file modification times according to Last-Modified HTTP response head
.B "\-\-no\-download"
Do not download any files
.TP
-.B "\-\-no\-postprocessors"
-Do not run any post processors
-.TP
-.B "\-\-no\-check\-certificate"
-Disable HTTPS certificate validation
-.TP
.B "\-o, \-\-option" \f[I]KEY=VALUE\f[]
Additional options. Example: -o browser=firefox
.TP
@@ -167,6 +167,12 @@ Additional configuration files in TOML format
.B "\-\-config\-create"
Create a basic configuration file
.TP
+.B "\-\-config\-status"
+Show configuration file status
+.TP
+.B "\-\-config\-open"
+Open configuration file in external application
+.TP
.B "\-\-config\-ignore"
Do not read default configuration files
.TP
@@ -188,15 +194,21 @@ Export session cookies to FILE
.B "\-\-cookies\-from\-browser" \f[I]BROWSER[/DOMAIN][+KEYRING][:PROFILE][::CONTAINER]\f[]
Name of the browser to load cookies from, with optional domain prefixed with '/', keyring name prefixed with '+', profile prefixed with ':', and container prefixed with '::' ('none' for no container)
.TP
-.B "\-\-download\-archive" \f[I]FILE\f[]
-Record all downloaded or skipped files in FILE and skip downloading any file already in it
-.TP
.B "\-A, \-\-abort" \f[I]N\f[]
Stop current extractor run after N consecutive file downloads were skipped
.TP
.B "\-T, \-\-terminate" \f[I]N\f[]
Stop current and parent extractor runs after N consecutive file downloads were skipped
.TP
+.B "\-\-filesize\-min" \f[I]SIZE\f[]
+Do not download files smaller than SIZE (e.g. 500k or 2.5M)
+.TP
+.B "\-\-filesize\-max" \f[I]SIZE\f[]
+Do not download files larger than SIZE (e.g. 500k or 2.5M)
+.TP
+.B "\-\-download\-archive" \f[I]FILE\f[]
+Record all downloaded or skipped files in FILE and skip downloading any file already in it
+.TP
.B "\-\-range" \f[I]RANGE\f[]
Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. '5', '8-20', or '1:24:3')
.TP
@@ -212,6 +224,9 @@ Like '--filter', but applies to manga chapters and other delegated URLs
.B "\-P, \-\-postprocessor" \f[I]NAME\f[]
Activate the specified post processor
.TP
+.B "\-\-no\-postprocessors"
+Do not run any post processors
+.TP
.B "\-O, \-\-postprocessor\-option" \f[I]KEY=VALUE\f[]
Additional post processor options
.TP
diff --git a/data/man/gallery-dl.conf.5 b/data/man/gallery-dl.conf.5
index 5db584b..e3ed58a 100644
--- a/data/man/gallery-dl.conf.5
+++ b/data/man/gallery-dl.conf.5
@@ -1,4 +1,4 @@
-.TH "GALLERY-DL.CONF" "5" "2024-06-01" "1.27.0" "gallery-dl Manual"
+.TH "GALLERY-DL.CONF" "5" "2024-06-22" "1.27.1" "gallery-dl Manual"
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
@@ -568,6 +568,18 @@ to be prompted for a passeword when performing a login
(see \f[I]getpass()\f[]).
+.SS extractor.*.input
+.IP "Type:" 6
+\f[I]bool\f[]
+
+.IP "Default:" 9
+\f[I]true\f[] if stdin is attached to a terminal ,
+\f[I]false\f[] otherwise
+
+.IP "Description:" 4
+Allow prompting the user for interactive input.
+
+
.SS extractor.*.netrc
.IP "Type:" 6
\f[I]bool\f[]
@@ -1056,7 +1068,7 @@ Prefix for archive IDs.
.IP "Description:" 4
A list of SQLite \f[I]PRAGMA\f[] statements to run during archive initialization.
-See \f[I]<https://www.sqlite.org/pragma.html>\f[]
+See \f[I]<https://www.sqlite.org/pragma.html#toc>\f[]
for available \f[I]PRAGMA\f[] statements and further details.
@@ -1752,6 +1764,20 @@ to access 18+ content without \f[I]API Key\f[].
See \f[I]Filters\f[] for details.
+.SS extractor.derpibooru.svg
+.IP "Type:" 6
+\f[I]bool\f[]
+
+.IP "Default:" 9
+\f[I]true\f[]
+
+.IP "Description:" 4
+Download SVG versions of images when available.
+
+Try to download the \f[I]view_url\f[] version of these posts
+when this option is disabled.
+
+
.SS extractor.deviantart.auto-watch
.IP "Type:" 6
\f[I]bool\f[]
@@ -2300,8 +2326,8 @@ Control behavior on embedded content from external sites.
* \f[I]true\f[]: Extract embed URLs and download them if supported
(videos are not downloaded).
.br
-* \f[I]"ytdl"\f[]: Like \f[I]true\f[], but let \f[I]youtube-dl\f[] handle video
-extraction and download for YouTube, Vimeo and SoundCloud embeds.
+* \f[I]"ytdl"\f[]: Like \f[I]true\f[], but let \f[I]ytdl\f[] handle video
+extraction and download for YouTube, Vimeo, and SoundCloud embeds.
.br
* \f[I]false\f[]: Ignore embeds.
@@ -3205,13 +3231,19 @@ Download original Adobe Flash animations instead of pre-rendered videos.
.SS extractor.newgrounds.format
.IP "Type:" 6
-\f[I]string\f[]
+.br
+* \f[I]string\f[]
+.br
+* \f[I]list\f[] of \f[I]string\f[]
.IP "Default:" 9
\f[I]"original"\f[]
.IP "Example:" 4
-"720p"
+.br
+* "720p"
+.br
+* ["mp4", "mov", "1080p", "720p"]
.IP "Description:" 4
Selects the preferred format for video downloads.
@@ -3219,6 +3251,10 @@ Selects the preferred format for video downloads.
If the selected format is not available,
the next smaller one gets chosen.
+If this is a \f[I]list\f[], try each given
+filename extension in original resolution or recoded format
+until an available format is found.
+
.SS extractor.newgrounds.include
.IP "Type:" 6
@@ -3304,7 +3340,7 @@ Control video download behavior.
.br
* \f[I]true\f[]: Download videos
.br
-* \f[I]"ytdl"\f[]: Download videos using \f[I]youtube-dl\f[]
+* \f[I]"ytdl"\f[]: Download videos using \f[I]ytdl\f[]
.br
* \f[I]false\f[]: Skip video Tweets
@@ -3392,17 +3428,6 @@ Available types are
\f[I]postfile\f[], \f[I]images\f[], \f[I]image_large\f[], \f[I]attachments\f[], and \f[I]content\f[].
-.SS extractor.photobucket.subalbums
-.IP "Type:" 6
-\f[I]bool\f[]
-
-.IP "Default:" 9
-\f[I]true\f[]
-
-.IP "Description:" 4
-Download subalbums.
-
-
.SS extractor.pillowfort.external
.IP "Type:" 6
\f[I]bool\f[]
@@ -3826,13 +3851,13 @@ at 600 requests every 10 minutes/600 seconds.
Control video download behavior.
.br
-* \f[I]true\f[]: Download videos and use \f[I]youtube-dl\f[] to handle
+* \f[I]true\f[]: Download videos and use \f[I]ytdl\f[] to handle
HLS and DASH manifests
.br
-* \f[I]"ytdl"\f[]: Download videos and let \f[I]youtube-dl\f[] handle all of
+* \f[I]"ytdl"\f[]: Download videos and let \f[I]ytdl\f[] handle all of
video extraction and download
.br
-* \f[I]"dash"\f[]: Extract DASH manifest URLs and use \f[I]youtube-dl\f[]
+* \f[I]"dash"\f[]: Extract DASH manifest URLs and use \f[I]ytdl\f[]
to download and merge them. (*)
.br
* \f[I]false\f[]: Ignore videos
@@ -4398,6 +4423,20 @@ to access 18+ content without \f[I]API Key\f[].
See \f[I]Filters\f[] for details.
+.SS extractor.twibooru.svg
+.IP "Type:" 6
+\f[I]bool\f[]
+
+.IP "Default:" 9
+\f[I]true\f[]
+
+.IP "Description:" 4
+Download SVG versions of images when available.
+
+Try to download the \f[I]view_url\f[] version of these posts
+when this option is disabled.
+
+
.SS extractor.twitter.ads
.IP "Type:" 6
\f[I]bool\f[]
@@ -4427,7 +4466,8 @@ Controls how to handle \f[I]Twitter Cards\f[].
.br
* \f[I]true\f[]: Download image content from supported cards
.br
-* \f[I]"ytdl"\f[]: Additionally download video content from unsupported cards using \f[I]youtube-dl\f[]
+* \f[I]"ytdl"\f[]: Additionally download video content from unsupported cards
+using \f[I]ytdl\f[]
.SS extractor.twitter.cards-blacklist
@@ -4506,6 +4546,18 @@ for each Tweet in said timeline.
Note: This requires at least 1 additional API call per initial Tweet.
+.SS extractor.twitter.unavailable
+.IP "Type:" 6
+\f[I]bool\f[]
+
+.IP "Default:" 9
+\f[I]false\f[]
+
+.IP "Description:" 4
+Try to download media marked as \f[I]Unavailable\f[],
+e.g. \f[I]Geoblocked\f[] videos.
+
+
.SS extractor.twitter.include
.IP "Type:" 6
.br
@@ -4633,6 +4685,8 @@ Selects how to handle exceeding the API rate limit.
* \f[I]"abort"\f[]: Raise an error and stop extraction
.br
* \f[I]"wait"\f[]: Wait until rate limit reset
+.br
+* \f[I]"wait:N"\f[]: Wait for \f[I]N\f[] seconds
.SS extractor.twitter.relogin
@@ -4644,10 +4698,8 @@ Selects how to handle exceeding the API rate limit.
.IP "Description:" 4
When receiving a "Could not authenticate you" error while logged in with
-.br
\f[I]username & passeword\f[],
refresh the current login session and
-.br
try to continue from where it left off.
@@ -4760,6 +4812,18 @@ Extract \f[I]TwitPic\f[] embeds.
Ignore previously seen Tweets.
+.SS extractor.twitter.username-alt
+.IP "Type:" 6
+\f[I]string\f[]
+
+.IP "Description:" 4
+Alternate Identifier (username, email, phone number)
+when \f[I]logging in\f[].
+
+When not specified and asked for by Twitter,
+this identifier will need to entered in an interactive prompt.
+
+
.SS extractor.twitter.users
.IP "Type:" 6
\f[I]string\f[]
@@ -4809,7 +4873,7 @@ Control video download behavior.
.br
* \f[I]true\f[]: Download videos
.br
-* \f[I]"ytdl"\f[]: Download videos using \f[I]youtube-dl\f[]
+* \f[I]"ytdl"\f[]: Download videos using \f[I]ytdl\f[]
.br
* \f[I]false\f[]: Skip video Tweets
@@ -5025,6 +5089,39 @@ will be taken from the original posts, not the retweeted posts.
Download video files.
+.SS extractor.ytdl.cmdline-args
+.IP "Type:" 6
+.br
+* \f[I]string\f[]
+.br
+* \f[I]list\f[] of \f[I]strings\f[]
+
+.IP "Example:" 4
+.br
+* "--quiet --write-sub --merge-output-format mkv"
+.br
+* ["--quiet", "--write-sub", "--merge-output-format", "mkv"]
+
+.IP "Description:" 4
+Additional \f[I]ytdl\f[] options specified as command-line arguments.
+
+See
+\f[I]yt-dlp options\f[]
+/
+\f[I]youtube-dl options\f[]
+
+
+.SS extractor.ytdl.config-file
+.IP "Type:" 6
+\f[I]Path\f[]
+
+.IP "Example:" 4
+"~/.config/yt-dlp/config"
+
+.IP "Description:" 4
+Location of a \f[I]ytdl\f[] configuration file to load options from.
+
+
.SS extractor.ytdl.enabled
.IP "Type:" 6
\f[I]bool\f[]
@@ -5033,7 +5130,7 @@ Download video files.
\f[I]false\f[]
.IP "Description:" 4
-Match **all** URLs, even ones without a \f[I]ytdl:\f[] prefix.
+Process URLs otherwise unsupported by gallery-dl with \f[I]ytdl\f[].
.SS extractor.ytdl.format
@@ -5041,25 +5138,35 @@ Match **all** URLs, even ones without a \f[I]ytdl:\f[] prefix.
\f[I]string\f[]
.IP "Default:" 9
-youtube-dl's default, currently \f[I]"bestvideo+bestaudio/best"\f[]
+Default of the \f[I]ytdl\f[] \f[I]module\f[] used.
+.br
+(\f[I]"bestvideo*+bestaudio/best"\f[] for \f[I]yt_dlp\f[],
+.br
+\f[I]"bestvideo+bestaudio/best"\f[] for \f[I]youtube_dl\f[])
.IP "Description:" 4
-Video \f[I]format selection
-<https://github.com/ytdl-org/youtube-dl#format-selection>\f[]
-directly passed to youtube-dl.
+\f[I]ytdl\f[] format selection string.
+
+See
+\f[I]yt-dlp format selection\f[]
+/
+\f[I]youtube-dl format selection\f[]
.SS extractor.ytdl.generic
.IP "Type:" 6
-\f[I]bool\f[]
+.br
+* \f[I]bool\f[]
+.br
+* \f[I]string\f[]
.IP "Default:" 9
\f[I]true\f[]
.IP "Description:" 4
-Controls the use of youtube-dl's generic extractor.
+Enables the use of \f[I]ytdl's\f[] \f[I]generic\f[] extractor.
-Set this option to \f[I]"force"\f[] for the same effect as youtube-dl's
+Set this option to \f[I]"force"\f[] for the same effect as
\f[I]--force-generic-extractor\f[].
@@ -5071,8 +5178,8 @@ Set this option to \f[I]"force"\f[] for the same effect as youtube-dl's
\f[I]true\f[]
.IP "Description:" 4
-Route youtube-dl's output through gallery-dl's logging system.
-Otherwise youtube-dl will write its output directly to stdout/stderr.
+Route \f[I]ytdl's\f[] output through gallery-dl's logging system.
+Otherwise it will be written directly to stdout/stderr.
Note: Set \f[I]quiet\f[] and \f[I]no_warnings\f[] in
\f[I]extractor.ytdl.raw-options\f[] to \f[I]true\f[] to suppress all output.
@@ -5086,7 +5193,7 @@ Note: Set \f[I]quiet\f[] and \f[I]no_warnings\f[] in
\f[I]null\f[]
.IP "Description:" 4
-Name of the youtube-dl Python module to import.
+Name of the \f[I]ytdl\f[] Python module to import.
Setting this to \f[I]null\f[] will try to import \f[I]"yt_dlp"\f[]
followed by \f[I]"youtube_dl"\f[] as fallback.
@@ -5109,36 +5216,10 @@ followed by \f[I]"youtube_dl"\f[] as fallback.
.IP "Description:" 4
Additional options passed directly to the \f[I]YoutubeDL\f[] constructor.
-All available options can be found in \f[I]youtube-dl's docstrings
-<https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L138-L318>\f[].
-
-
-.SS extractor.ytdl.cmdline-args
-.IP "Type:" 6
-.br
-* \f[I]string\f[]
-.br
-* \f[I]list\f[] of \f[I]strings\f[]
-
-.IP "Example:" 4
-.br
-* "--quiet --write-sub --merge-output-format mkv"
-.br
-* ["--quiet", "--write-sub", "--merge-output-format", "mkv"]
-
-.IP "Description:" 4
-Additional options specified as youtube-dl command-line arguments.
-
-
-.SS extractor.ytdl.config-file
-.IP "Type:" 6
-\f[I]Path\f[]
-
-.IP "Example:" 4
-"~/.config/youtube-dl/config"
-
-.IP "Description:" 4
-Location of a youtube-dl configuration file to load options from.
+Available options can be found in
+\f[I]yt-dlp's docstrings\f[]
+/
+\f[I]youtube-dl's docstrings\f[]
.SS extractor.zerochan.metadata
@@ -5504,17 +5585,57 @@ Fail a download when a file does not pass
instead of downloading a potentially broken file.
+.SS downloader.ytdl.cmdline-args
+.IP "Type:" 6
+.br
+* \f[I]string\f[]
+.br
+* \f[I]list\f[] of \f[I]strings\f[]
+
+.IP "Example:" 4
+.br
+* "--quiet --write-sub --merge-output-format mkv"
+.br
+* ["--quiet", "--write-sub", "--merge-output-format", "mkv"]
+
+.IP "Description:" 4
+Additional \f[I]ytdl\f[] options specified as command-line arguments.
+
+See
+\f[I]yt-dlp options\f[]
+/
+\f[I]youtube-dl options\f[]
+
+
+.SS downloader.ytdl.config-file
+.IP "Type:" 6
+\f[I]Path\f[]
+
+.IP "Example:" 4
+"~/.config/yt-dlp/config"
+
+.IP "Description:" 4
+Location of a \f[I]ytdl\f[] configuration file to load options from.
+
+
.SS downloader.ytdl.format
.IP "Type:" 6
\f[I]string\f[]
.IP "Default:" 9
-youtube-dl's default, currently \f[I]"bestvideo+bestaudio/best"\f[]
+Default of the \f[I]ytdl\f[] \f[I]module\f[] used.
+.br
+(\f[I]"bestvideo*+bestaudio/best"\f[] for \f[I]yt_dlp\f[],
+.br
+\f[I]"bestvideo+bestaudio/best"\f[] for \f[I]youtube_dl\f[])
.IP "Description:" 4
-Video \f[I]format selection
-<https://github.com/ytdl-org/youtube-dl#format-selection>\f[]
-directly passed to youtube-dl.
+\f[I]ytdl\f[] format selection string.
+
+See
+\f[I]yt-dlp format selection\f[]
+/
+\f[I]youtube-dl format selection\f[]
.SS downloader.ytdl.forward-cookies
@@ -5525,7 +5646,7 @@ directly passed to youtube-dl.
\f[I]false\f[]
.IP "Description:" 4
-Forward cookies to youtube-dl.
+Forward gallery-dl's cookies to \f[I]ytdl\f[].
.SS downloader.ytdl.logging
@@ -5536,8 +5657,8 @@ Forward cookies to youtube-dl.
\f[I]true\f[]
.IP "Description:" 4
-Route youtube-dl's output through gallery-dl's logging system.
-Otherwise youtube-dl will write its output directly to stdout/stderr.
+Route \f[I]ytdl's\f[] output through gallery-dl's logging system.
+Otherwise it will be written directly to stdout/stderr.
Note: Set \f[I]quiet\f[] and \f[I]no_warnings\f[] in
\f[I]downloader.ytdl.raw-options\f[] to \f[I]true\f[] to suppress all output.
@@ -5551,10 +5672,10 @@ Note: Set \f[I]quiet\f[] and \f[I]no_warnings\f[] in
\f[I]null\f[]
.IP "Description:" 4
-Name of the youtube-dl Python module to import.
+Name of the \f[I]ytdl\f[] Python module to import.
-Setting this to \f[I]null\f[] will first try to import \f[I]"yt_dlp"\f[]
-and use \f[I]"youtube_dl"\f[] as fallback.
+Setting this to \f[I]null\f[] will try to import \f[I]"yt_dlp"\f[]
+followed by \f[I]"youtube_dl"\f[] as fallback.
.SS downloader.ytdl.outtmpl
@@ -5565,18 +5686,25 @@ and use \f[I]"youtube_dl"\f[] as fallback.
\f[I]null\f[]
.IP "Description:" 4
-The \f[I]Output Template\f[]
-used to generate filenames for files downloaded with youtube-dl.
+The Output Template
+used to generate filenames for files downloaded with \f[I]ytdl\f[].
+
+See
+\f[I]yt-dlp output template\f[]
+/
+\f[I]youtube-dl output template\f[].
Special values:
.br
* \f[I]null\f[]: generate filenames with \f[I]extractor.*.filename\f[]
.br
-* \f[I]"default"\f[]: use youtube-dl's default, currently \f[I]"%(title)s-%(id)s.%(ext)s"\f[]
+* \f[I]"default"\f[]: use \f[I]ytdl's\f[] default, currently
+\f[I]"%(title)s [%(id)s].%(ext)s"\f[] for \f[I]yt-dlp\f[] /
+\f[I]"%(title)s-%(id)s.%(ext)s"\f[] for \f[I]youtube-dl\f[]
Note: An output template other than \f[I]null\f[] might
-cause unexpected results in combination with other options
+cause unexpected results in combination with certain options
(e.g. \f[I]"skip": "enumerate"\f[])
@@ -5597,36 +5725,10 @@ cause unexpected results in combination with other options
.IP "Description:" 4
Additional options passed directly to the \f[I]YoutubeDL\f[] constructor.
-All available options can be found in \f[I]youtube-dl's docstrings
-<https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L138-L318>\f[].
-
-
-.SS downloader.ytdl.cmdline-args
-.IP "Type:" 6
-.br
-* \f[I]string\f[]
-.br
-* \f[I]list\f[] of \f[I]strings\f[]
-
-.IP "Example:" 4
-.br
-* "--quiet --write-sub --merge-output-format mkv"
-.br
-* ["--quiet", "--write-sub", "--merge-output-format", "mkv"]
-
-.IP "Description:" 4
-Additional options specified as youtube-dl command-line arguments.
-
-
-.SS downloader.ytdl.config-file
-.IP "Type:" 6
-\f[I]Path\f[]
-
-.IP "Example:" 4
-"~/.config/youtube-dl/config"
-
-.IP "Description:" 4
-Location of a youtube-dl configuration file to load options from.
+Available options can be found in
+\f[I]yt-dlp's docstrings\f[]
+/
+\f[I]youtube-dl's docstrings\f[]
.SH OUTPUT OPTIONS
@@ -6569,7 +6671,7 @@ Filename extension for the resulting video files.
["-c:v", "libvpx-vp9", "-an", "-b:v", "2M"]
.IP "Description:" 4
-Additional FFmpeg command-line arguments.
+Additional \f[I]ffmpeg\f[] command-line arguments.
.SS ugoira.ffmpeg-demuxer
@@ -6580,7 +6682,9 @@ Additional FFmpeg command-line arguments.
\f[I]auto\f[]
.IP "Description:" 4
-FFmpeg demuxer to read and process input files with. Possible values are
+\f[I]ffmpeg\f[] demuxer to read and process input files with.
+
+Possible values are
.br
* "\f[I]concat\f[]" (inaccurate frame timecodes for non-uniform frame delays)
@@ -6626,15 +6730,15 @@ Location of the \f[I]mkvmerge\f[] executable for use with the
\f[I]"error"\f[]
.IP "Description:" 4
-Controls FFmpeg output.
+Controls \f[I]ffmpeg\f[] output.
.br
-* \f[I]true\f[]: Enable FFmpeg output
+* \f[I]true\f[]: Enable \f[I]ffmpeg\f[] output
.br
-* \f[I]false\f[]: Disable all FFmpeg output
+* \f[I]false\f[]: Disable all \f[I]ffmpeg\f[] output
.br
* any \f[I]string\f[]: Pass \f[I]-hide_banner\f[] and \f[I]-loglevel\f[]
-with this value as argument to FFmpeg
+with this value as argument to \f[I]ffmpeg\f[]
.SS ugoira.ffmpeg-twopass
@@ -6656,7 +6760,7 @@ Enable Two-Pass encoding.
\f[I]"auto"\f[]
.IP "Description:" 4
-Controls the frame rate argument (\f[I]-r\f[]) for FFmpeg
+Controls the frame rate argument (\f[I]-r\f[]) for \f[I]ffmpeg\f[]
.br
* \f[I]"auto"\f[]: Automatically assign a fitting frame rate
@@ -6697,7 +6801,7 @@ for more information.
This option, when \f[I]libx264/5\f[] is used, automatically
adds \f[I]["-vf", "crop=iw-mod(iw\\\\,2):ih-mod(ih\\\\,2)"]\f[]
-to the list of FFmpeg command-line arguments
+to the list of \f[I]ffmpeg\f[] command-line arguments
to reduce an odd width/height by 1 pixel and make them even.
@@ -6838,7 +6942,8 @@ Path to or name of an
Python module,
whose namespace,
.br
-in addition to the \f[I]GLOBALS\f[] dict in \f[I]util.py\f[],
+in addition to the \f[I]GLOBALS\f[] dict in
+\f[I]util.py\f[],
gets used as \f[I]globals parameter\f[] for compiled Python expressions.
@@ -7266,9 +7371,10 @@ Set file modification time according to its metadata
\f[I]python\f[]
Call Python functions
\f[I]ugoira\f[]
-Convert Pixiv Ugoira to WebM using \f[I]FFmpeg\f[]
+Convert Pixiv Ugoira to WebM using \f[I]ffmpeg\f[]
\f[I]zip\f[]
Store files in a ZIP archive
+\f[I]ytdl\f[]
diff --git a/gallery_dl.egg-info/PKG-INFO b/gallery_dl.egg-info/PKG-INFO
index 4b15a5f..a06aa55 100644
--- a/gallery_dl.egg-info/PKG-INFO
+++ b/gallery_dl.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: gallery_dl
-Version: 1.27.0
+Version: 1.27.1
Summary: Command-line program to download image galleries and collections from several image hosting sites
Home-page: https://github.com/mikf/gallery-dl
Download-URL: https://github.com/mikf/gallery-dl/releases/latest
@@ -112,9 +112,9 @@ Standalone Executable
Prebuilt executable files with a Python interpreter and
required Python packages included are available for
-- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.exe>`__
+- `Windows <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.exe>`__
(Requires `Microsoft Visual C++ Redistributable Package (x86) <https://aka.ms/vs/17/release/vc_redist.x86.exe>`__)
-- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.0/gallery-dl.bin>`__
+- `Linux <https://github.com/mikf/gallery-dl/releases/download/v1.27.1/gallery-dl.bin>`__
Nightly Builds
diff --git a/gallery_dl.egg-info/SOURCES.txt b/gallery_dl.egg-info/SOURCES.txt
index a1745df..a892544 100644
--- a/gallery_dl.egg-info/SOURCES.txt
+++ b/gallery_dl.egg-info/SOURCES.txt
@@ -68,7 +68,6 @@ gallery_dl/extractor/booru.py
gallery_dl/extractor/bunkr.py
gallery_dl/extractor/catbox.py
gallery_dl/extractor/chevereto.py
-gallery_dl/extractor/cien.py
gallery_dl/extractor/comicvine.py
gallery_dl/extractor/common.py
gallery_dl/extractor/cyberdrop.py
@@ -103,6 +102,7 @@ gallery_dl/extractor/hentaifoundry.py
gallery_dl/extractor/hentaifox.py
gallery_dl/extractor/hentaihand.py
gallery_dl/extractor/hentaihere.py
+gallery_dl/extractor/hentainexus.py
gallery_dl/extractor/hiperdex.py
gallery_dl/extractor/hitomi.py
gallery_dl/extractor/hotleak.py
@@ -160,7 +160,6 @@ gallery_dl/extractor/oauth.py
gallery_dl/extractor/paheal.py
gallery_dl/extractor/patreon.py
gallery_dl/extractor/philomena.py
-gallery_dl/extractor/photobucket.py
gallery_dl/extractor/photovogue.py
gallery_dl/extractor/picarto.py
gallery_dl/extractor/piczel.py
diff --git a/gallery_dl/__init__.py b/gallery_dl/__init__.py
index bc44b35..1d4215e 100644
--- a/gallery_dl/__init__.py
+++ b/gallery_dl/__init__.py
@@ -220,8 +220,13 @@ def main():
cnt, "entry" if cnt == 1 else "entries", cache._path(),
)
- elif args.config_init:
- return config.initialize()
+ elif args.config:
+ if args.config == "init":
+ return config.initialize()
+ elif args.config == "status":
+ return config.status()
+ else:
+ return config.open_extern()
else:
if not args.urls and not args.input_files:
diff --git a/gallery_dl/config.py b/gallery_dl/config.py
index 4be6c53..0a187c1 100644
--- a/gallery_dl/config.py
+++ b/gallery_dl/config.py
@@ -90,13 +90,85 @@ def initialize():
return 0
+def open_extern():
+ for path in _default_configs:
+ path = util.expand_path(path)
+ if os.access(path, os.R_OK | os.W_OK):
+ break
+ else:
+ log.warning("Unable to find any writable configuration file")
+ return 1
+
+ if util.WINDOWS:
+ openers = ("explorer", "notepad")
+ else:
+ openers = ("xdg-open", "open")
+ editor = os.environ.get("EDITOR")
+ if editor:
+ openers = (editor,) + openers
+
+ import shutil
+ for opener in openers:
+ opener = shutil.which(opener)
+ if opener:
+ break
+ else:
+ log.warning("Unable to find a program to open '%s' with", path)
+ return 1
+
+ log.info("Running '%s %s'", opener, path)
+ retcode = util.Popen((opener, path)).wait()
+
+ if not retcode:
+ try:
+ with open(path, encoding="utf-8") as fp:
+ util.json_loads(fp.read())
+ except Exception as exc:
+ log.warning("%s when parsing '%s': %s",
+ exc.__class__.__name__, path, exc)
+ return 2
+
+ return retcode
+
+
+def status():
+ from .output import stdout_write
+
+ paths = []
+ for path in _default_configs:
+ path = util.expand_path(path)
+
+ try:
+ with open(path, encoding="utf-8") as fp:
+ util.json_loads(fp.read())
+ except FileNotFoundError:
+ status = "Not Present"
+ except OSError:
+ status = "Inaccessible"
+ except ValueError:
+ status = "Invalid JSON"
+ except Exception as exc:
+ log.debug(exc)
+ status = "Unknown"
+ else:
+ status = "OK"
+
+ paths.append((path, status))
+
+ fmt = "{{:<{}}} : {{}}\n".format(
+ max(len(p[0]) for p in paths)).format
+
+ for path, status in paths:
+ stdout_write(fmt(path, status))
+
+
def load(files=None, strict=False, loads=util.json_loads):
"""Load JSON configuration files"""
for pathfmt in files or _default_configs:
path = util.expand_path(pathfmt)
try:
- with open(path, encoding="utf-8") as file:
- conf = loads(file.read())
+ with open(path, encoding="utf-8") as fp:
+ conf = loads(fp.read())
except OSError as exc:
if strict:
log.error(exc)
diff --git a/gallery_dl/cookies.py b/gallery_dl/cookies.py
index b4986c1..47f78a7 100644
--- a/gallery_dl/cookies.py
+++ b/gallery_dl/cookies.py
@@ -188,8 +188,8 @@ def _firefox_cookies_database(profile=None, container=None):
os.path.dirname(path), "containers.json")
try:
- with open(containers_path) as file:
- identities = util.json_loads(file.read())["identities"]
+ with open(containers_path) as fp:
+ identities = util.json_loads(fp.read())["identities"]
except OSError:
_log_error("Unable to read Firefox container database at '%s'",
containers_path)
@@ -745,8 +745,8 @@ def _get_windows_v10_key(browser_root):
_log_error("Unable to find Local State file")
return None
_log_debug("Found Local State file at '%s'", path)
- with open(path, encoding="utf-8") as file:
- data = util.json_loads(file.read())
+ with open(path, encoding="utf-8") as fp:
+ data = util.json_loads(fp.read())
try:
base64_key = data["os_crypt"]["encrypted_key"]
except KeyError:
diff --git a/gallery_dl/downloader/text.py b/gallery_dl/downloader/text.py
index c57fbd0..a668d62 100644
--- a/gallery_dl/downloader/text.py
+++ b/gallery_dl/downloader/text.py
@@ -18,8 +18,8 @@ class TextDownloader(DownloaderBase):
if self.part:
pathfmt.part_enable(self.partdir)
self.out.start(pathfmt.path)
- with pathfmt.open("wb") as file:
- file.write(url.encode()[5:])
+ with pathfmt.open("wb") as fp:
+ fp.write(url.encode()[5:])
return True
diff --git a/gallery_dl/downloader/ytdl.py b/gallery_dl/downloader/ytdl.py
index adada75..87e7756 100644
--- a/gallery_dl/downloader/ytdl.py
+++ b/gallery_dl/downloader/ytdl.py
@@ -89,6 +89,11 @@ class YoutubeDLDownloader(DownloaderBase):
formats = info_dict.get("requested_formats")
if formats and not compatible_formats(formats):
info_dict["ext"] = "mkv"
+ elif "ext" not in info_dict:
+ try:
+ info_dict["ext"] = info_dict["formats"][0]["ext"]
+ except LookupError:
+ info_dict["ext"] = "mp4"
if self.outtmpl:
self._set_outtmpl(ytdl_instance, self.outtmpl)
diff --git a/gallery_dl/extractor/__init__.py b/gallery_dl/extractor/__init__.py
index 591e6a8..6aff1f3 100644
--- a/gallery_dl/extractor/__init__.py
+++ b/gallery_dl/extractor/__init__.py
@@ -62,6 +62,7 @@ modules = [
"hentaifox",
"hentaihand",
"hentaihere",
+ "hentainexus",
"hiperdex",
"hitomi",
"hotleak",
@@ -113,7 +114,6 @@ modules = [
"paheal",
"patreon",
"philomena",
- "photobucket",
"photovogue",
"picarto",
"piczel",
diff --git a/gallery_dl/extractor/cien.py b/gallery_dl/extractor/cien.py
deleted file mode 100644
index a9ccab5..0000000
--- a/gallery_dl/extractor/cien.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2024 Mike Fährmann
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-
-"""Extractors for https://ci-en.net/"""
-
-from .common import Extractor, Message
-from .. import text, util
-
-BASE_PATTERN = r"(?:https?://)?ci-en\.(?:net|dlsite\.com)"
-
-
-class CienExtractor(Extractor):
- category = "cien"
- root = "https://ci-en.net"
-
- def __init__(self, match):
- self.root = text.root_from_url(match.group(0))
- Extractor.__init__(self, match)
-
- def _pagination_articles(self, url, params):
- data = {"extractor": CienArticleExtractor}
- params["page"] = text.parse_int(params.get("page"), 1)
-
- while True:
- page = self.request(url, params=params).text
-
- for card in text.extract_iter(
- page, ' class="c-cardCase-item', '</div>'):
- article_url = text.extr(card, ' href="', '"')
- yield Message.Queue, article_url, data
-
- if ' rel="next"' not in page:
- return
- params["page"] += 1
-
-
-class CienArticleExtractor(CienExtractor):
- subcategory = "article"
- pattern = BASE_PATTERN + r"/creator/(\d+)/article/(\d+)"
- example = "https://ci-en.net/creator/123/article/12345"
-
- def items(self):
- url = "{}/creator/{}/article/{}".format(
- self.root, self.groups[0], self.groups[1])
- page = self.request(url, notfound="article").text
- return
- yield 1
-
-
-class CienCreatorExtractor(CienExtractor):
- subcategory = "creator"
- pattern = BASE_PATTERN + r"/creator/(\d+)(?:/article(?:\?([^#]+))?)?/?$"
- example = "https://ci-en.net/creator/123"
-
- def items(self):
- url = "{}/creator/{}/article".format(self.root, self.groups[0])
- params = text.parse_query(self.groups[1])
- params["mode"] = "list"
- return self._pagination_articles(url, params)
-
-
-class CienRecentExtractor(CienExtractor):
- subcategory = "recent"
- pattern = BASE_PATTERN + r"/mypage/recent(?:\?([^#]+))?"
- example = "https://ci-en.net/mypage/recent"
-
- def items(self):
- url = self.root + "/mypage/recent"
- params = text.parse_query(self.groups[0])
- return self._pagination_articles(url, params)
-
-
-class CienFollowingExtractor(CienExtractor):
- subcategory = "following"
- pattern = BASE_PATTERN + r"/mypage/subscription(/following)?"
- example = "https://ci-en.net/mypage/subscription"
-
- def items(self):
- url = self.root + "/mypage/recent"
- params = text.parse_query(self.groups[0])
- return self._pagination_articles(url, params)
diff --git a/gallery_dl/extractor/common.py b/gallery_dl/extractor/common.py
index 8771261..d7a41bc 100644
--- a/gallery_dl/extractor/common.py
+++ b/gallery_dl/extractor/common.py
@@ -11,6 +11,7 @@
import os
import re
import ssl
+import sys
import time
import netrc
import queue
@@ -42,6 +43,7 @@ class Extractor():
browser = None
request_interval = 0.0
request_interval_min = 0.0
+ request_interval_429 = 60.0
request_timestamp = 0.0
def __init__(self, match):
@@ -202,7 +204,9 @@ class Extractor():
self.log.warning("Cloudflare CAPTCHA")
break
- if code == 429 and self._interval_429:
+ if code == 429 and self._handle_429(response):
+ continue
+ elif code == 429 and self._interval_429:
pass
elif code not in retry_codes and code < 500:
break
@@ -230,6 +234,8 @@ class Extractor():
raise exception.HttpError(msg, response)
+ _handle_429 = util.false
+
def wait(self, seconds=None, until=None, adjust=1.0,
reason="rate limit"):
now = time.time()
@@ -263,6 +269,8 @@ class Extractor():
time.sleep(seconds)
def input(self, prompt, echo=True):
+ self._check_input_allowed(prompt)
+
if echo:
try:
return input(prompt)
@@ -271,13 +279,30 @@ class Extractor():
else:
return getpass.getpass(prompt)
+ def _check_input_allowed(self, prompt=""):
+ input = self.config("input")
+
+ if input is None:
+ try:
+ input = sys.stdin.isatty()
+ except Exception:
+ input = False
+
+ if not input:
+ raise exception.StopExtraction(
+ "User input required (%s)", prompt.strip(" :"))
+
def _get_auth_info(self):
"""Return authentication information as (username, password) tuple"""
username = self.config("username")
password = None
if username:
- password = self.config("password") or util.LazyPrompt()
+ password = self.config("password")
+ if not password:
+ self._check_input_allowed("password")
+ password = util.LazyPrompt()
+
elif self.config("netrc", False):
try:
info = netrc.netrc().authenticators(self.category)
@@ -304,7 +329,7 @@ class Extractor():
self.request_interval_min,
)
self._interval_429 = util.build_duration_func(
- self.config("sleep-429", 60),
+ self.config("sleep-429", self.request_interval_429),
)
if self._retries < 0:
@@ -837,7 +862,7 @@ def _build_requests_adapter(ssl_options, ssl_ciphers, source_address):
if ssl_options or ssl_ciphers:
ssl_context = urllib3.connection.create_urllib3_context(
options=ssl_options or None, ciphers=ssl_ciphers)
- if requests.__version__ > "2.31":
+ if not requests.__version__ < "2.32":
# https://github.com/psf/requests/pull/6731
ssl_context.load_default_certs()
ssl_context.check_hostname = False
diff --git a/gallery_dl/extractor/deviantart.py b/gallery_dl/extractor/deviantart.py
index 993885a..2199cc8 100644
--- a/gallery_dl/extractor/deviantart.py
+++ b/gallery_dl/extractor/deviantart.py
@@ -1730,15 +1730,16 @@ class DeviantartEclipseAPI():
url = "{}/{}/about".format(self.extractor.root, user)
page = self.request(url).text
- gruserid, pos = text.extract(page, ' data-userid="', '"')
+ gruser_id = text.extr(page, ' data-userid="', '"')
- pos = page.find('\\"type\\":\\"watching\\"', pos)
+ pos = page.find('\\"name\\":\\"watching\\"')
if pos < 0:
- raise exception.NotFoundError("module")
- moduleid = text.rextract(page, '\\"id\\":', ',', pos)[0].strip('" ')
+ raise exception.NotFoundError("'watching' module ID")
+ module_id = text.rextract(
+ page, '\\"id\\":', ',', pos)[0].strip('" ')
self._fetch_csrf_token(page)
- return gruserid, moduleid
+ return gruser_id, module_id
def _fetch_csrf_token(self, page=None):
if page is None:
diff --git a/gallery_dl/extractor/fanbox.py b/gallery_dl/extractor/fanbox.py
index 2223403..d81fd0b 100644
--- a/gallery_dl/extractor/fanbox.py
+++ b/gallery_dl/extractor/fanbox.py
@@ -113,7 +113,17 @@ class FanboxExtractor(Extractor):
post["user"] = self._get_user_data(post["creatorId"])
if self._meta_plan:
plans = self._get_plan_data(post["creatorId"])
- post["plan"] = plans[post["feeRequired"]]
+ fee = post["feeRequired"]
+ try:
+ post["plan"] = plans[fee]
+ except KeyError:
+ fees = [f for f in plans if f >= fee]
+ if fees:
+ plan = plans[min(fees)]
+ else:
+ plan = plans[0].copy()
+ plan["fee"] = fee
+ post["plan"] = plans[fee] = plan
return content_body, post
diff --git a/gallery_dl/extractor/hentainexus.py b/gallery_dl/extractor/hentainexus.py
new file mode 100644
index 0000000..97b7844
--- /dev/null
+++ b/gallery_dl/extractor/hentainexus.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2019-2024 Mike Fährmann
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+"""Extractors for https://hentainexus.com/"""
+
+from .common import GalleryExtractor, Extractor, Message
+from .. import text, util
+import binascii
+
+
+class HentainexusGalleryExtractor(GalleryExtractor):
+ """Extractor for hentainexus galleries"""
+ category = "hentainexus"
+ root = "https://hentainexus.com"
+ pattern = (r"(?i)(?:https?://)?(?:www\.)?hentainexus\.com"
+ r"/(?:view|read)/(\d+)")
+ example = "https://hentainexus.com/view/12345"
+
+ def __init__(self, match):
+ self.gallery_id = match.group(1)
+ url = "{}/view/{}".format(self.root, self.gallery_id)
+ GalleryExtractor.__init__(self, match, url)
+
+ def metadata(self, page):
+ rmve = text.remove_html
+ extr = text.extract_from(page)
+ data = {
+ "gallery_id": text.parse_int(self.gallery_id),
+ "cover" : extr('"og:image" content="', '"'),
+ "title" : extr('<h1 class="title">', '</h1>'),
+ }
+
+ for key in ("Artist", "Book", "Circle", "Event", "Language",
+ "Magazine", "Parody", "Publisher", "Description"):
+ value = rmve(extr('viewcolumn">' + key + '</td>', '</td>'))
+ value, sep, rest = value.rpartition(" (")
+ data[key.lower()] = value if sep else rest
+
+ data["tags"] = tags = []
+ for k in text.extract_iter(page, '<a href="/?q=tag:', '"'):
+ tags.append(text.unquote(k).strip('"').replace("+", " "))
+
+ if not data["language"]:
+ data["language"] = "English"
+ data["lang"] = util.language_to_code(data["language"])
+
+ if "doujin" in data["tags"]:
+ data["type"] = "Doujinshi"
+ elif "illustration" in data["tags"]:
+ data["type"] = "Illustration"
+ else:
+ data["type"] = "Manga"
+ data["title_conventional"] = self._join_title(data)
+ return data
+
+ def images(self, _):
+ url = "{}/read/{}".format(self.root, self.gallery_id)
+ page = self.request(url).text
+ imgs = util.json_loads(self._decode(text.extr(
+ page, 'initReader("', '"')))
+
+ headers = None
+ if not self.config("original", True):
+ headers = {"Accept": "image/webp,*/*"}
+ for img in imgs:
+ img["_http_headers"] = headers
+
+ return [
+ (img["image"], img)
+ for img in imgs
+ ]
+
+ @staticmethod
+ def _decode(data):
+ # https://hentainexus.com/static/js/reader.min.js?r=22
+ hostname = "hentainexus.com"
+ primes = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53)
+ blob = list(binascii.a2b_base64(data))
+ for i in range(0, len(hostname)):
+ blob[i] = blob[i] ^ ord(hostname[i])
+
+ key = blob[0:64]
+
+ C = 0
+ for k in key:
+ C = C ^ k
+ for _ in range(8):
+ if C & 1:
+ C = C >> 1 ^ 0xc
+ else:
+ C = C >> 1
+ k = primes[C & 0x7]
+
+ x = 0
+ S = list(range(256))
+ for i in range(256):
+ x = (x + S[i] + key[i % len(key)]) % 256
+ S[i], S[x] = S[x], S[i]
+
+ result = ""
+ a = c = m = x = 0
+ for n in range(64, len(blob)):
+ a = (a + k) % 256
+ x = (c + S[(x + S[a]) % 256]) % 256
+ c = (c + a + S[a]) % 256
+
+ S[a], S[x] = S[x], S[a]
+ m = S[(x + S[(a + S[(m + c) % 256]) % 256]) % 256]
+ result += chr(blob[n] ^ m)
+
+ return result
+
+ @staticmethod
+ def _join_title(data):
+ event = data['event']
+ artist = data['artist']
+ circle = data['circle']
+ title = data['title']
+ parody = data['parody']
+ book = data['book']
+ magazine = data['magazine']
+
+ # a few galleries have a large number of artists or parodies,
+ # which get replaced with "Various" in the title string
+ if artist.count(',') >= 3:
+ artist = 'Various'
+ if parody.count(',') >= 3:
+ parody = 'Various'
+
+ jt = ''
+ if event:
+ jt += '({}) '.format(event)
+ if circle:
+ jt += '[{} ({})] '.format(circle, artist)
+ else:
+ jt += '[{}] '.format(artist)
+ jt += title
+ if parody.lower() != 'original work':
+ jt += ' ({})'.format(parody)
+ if book:
+ jt += ' ({})'.format(book)
+ if magazine:
+ jt += ' ({})'.format(magazine)
+ return jt
+
+
+class HentainexusSearchExtractor(Extractor):
+ """Extractor for hentainexus search results"""
+ category = "hentainexus"
+ subcategory = "search"
+ root = "https://hentainexus.com"
+ pattern = (r"(?i)(?:https?://)?(?:www\.)?hentainexus\.com"
+ r"(?:/page/\d+)?/?(?:\?(q=[^/?#]+))?$")
+ example = "https://hentainexus.com/?q=QUERY"
+
+ def items(self):
+ params = text.parse_query(self.groups[0])
+ data = {"_extractor": HentainexusGalleryExtractor}
+ path = "/"
+
+ while path:
+ page = self.request(self.root + path, params=params).text
+ extr = text.extract_from(page)
+
+ while True:
+ gallery_id = extr('<a href="/view/', '"')
+ if not gallery_id:
+ break
+ yield Message.Queue, self.root + "/view/" + gallery_id, data
+
+ path = extr('class="pagination-next" href="', '"')
diff --git a/gallery_dl/extractor/hitomi.py b/gallery_dl/extractor/hitomi.py
index 88f5708..9b74700 100644
--- a/gallery_dl/extractor/hitomi.py
+++ b/gallery_dl/extractor/hitomi.py
@@ -58,6 +58,7 @@ class HitomiGalleryExtractor(GalleryExtractor):
return {
"gallery_id": text.parse_int(info["id"]),
"title" : info["title"],
+ "title_jpn" : info.get("japanese_title") or "",
"type" : info["type"].capitalize(),
"language" : language,
"lang" : util.language_to_code(language),
diff --git a/gallery_dl/extractor/instagram.py b/gallery_dl/extractor/instagram.py
index 9c2b1de..f7a5cc7 100644
--- a/gallery_dl/extractor/instagram.py
+++ b/gallery_dl/extractor/instagram.py
@@ -168,6 +168,7 @@ class InstagramExtractor(Extractor):
"likes": post.get("like_count", 0),
"pinned": post.get("timeline_pinned_user_ids", ()),
"date": text.parse_timestamp(post.get("taken_at")),
+ "liked": post.get("has_liked", False),
}
caption = post["caption"]
@@ -270,6 +271,7 @@ class InstagramExtractor(Extractor):
"typename" : typename,
"date" : text.parse_timestamp(post["taken_at_timestamp"]),
"likes" : post["edge_media_preview_like"]["count"],
+ "liked" : post.get("viewer_has_liked", False),
"pinned" : pinned,
"owner_id" : owner["id"],
"username" : owner.get("username"),
diff --git a/gallery_dl/extractor/kemonoparty.py b/gallery_dl/extractor/kemonoparty.py
index b0c24de..6f2d5f3 100644
--- a/gallery_dl/extractor/kemonoparty.py
+++ b/gallery_dl/extractor/kemonoparty.py
@@ -518,7 +518,8 @@ class KemonopartyFavoriteExtractor(KemonopartyExtractor):
if not sort:
sort = "updated"
- users.sort(key=lambda x: x[sort], reverse=(order == "desc"))
+ users.sort(key=lambda x: x[sort] or util.NONE,
+ reverse=(order == "desc"))
for user in users:
user["_extractor"] = KemonopartyUserExtractor
@@ -532,7 +533,8 @@ class KemonopartyFavoriteExtractor(KemonopartyExtractor):
if not sort:
sort = "faved_seq"
- posts.sort(key=lambda x: x[sort], reverse=(order == "desc"))
+ posts.sort(key=lambda x: x[sort] or util.NONE,
+ reverse=(order == "desc"))
for post in posts:
post["_extractor"] = KemonopartyPostExtractor
diff --git a/gallery_dl/extractor/newgrounds.py b/gallery_dl/extractor/newgrounds.py
index 7ac3a3a..ecd6619 100644
--- a/gallery_dl/extractor/newgrounds.py
+++ b/gallery_dl/extractor/newgrounds.py
@@ -12,6 +12,7 @@ from .common import Extractor, Message
from .. import text, util, exception
from ..cache import cache
import itertools
+import re
class NewgroundsExtractor(Extractor):
@@ -33,10 +34,16 @@ class NewgroundsExtractor(Extractor):
def _init(self):
self.flash = self.config("flash", True)
- fmt = self.config("format", "original")
- self.format = (True if not fmt or fmt == "original" else
- fmt if isinstance(fmt, int) else
- text.parse_int(fmt.rstrip("p")))
+ fmt = self.config("format")
+ if not fmt or fmt == "original":
+ self.format = ("mp4", "webm", "m4v", "mov", "mkv",
+ 1080, 720, 360)
+ elif isinstance(fmt, (list, tuple)):
+ self.format = fmt
+ else:
+ self._video_formats = self._video_formats_limit
+ self.format = (fmt if isinstance(fmt, int) else
+ text.parse_int(fmt.rstrip("p")))
def items(self):
self.login()
@@ -266,7 +273,7 @@ class NewgroundsExtractor(Extractor):
if src:
src = src.replace("\\/", "/")
- fallback = ()
+ formats = ()
date = text.parse_datetime(extr(
'itemprop="datePublished" content="', '"'))
else:
@@ -276,23 +283,8 @@ class NewgroundsExtractor(Extractor):
"X-Requested-With": "XMLHttpRequest",
}
sources = self.request(url, headers=headers).json()["sources"]
-
- if self.format is True:
- src = sources["360p"][0]["src"].replace(".360p.", ".")
- formats = sources
- else:
- formats = []
- for fmt, src in sources.items():
- width = text.parse_int(fmt.rstrip("p"))
- if width <= self.format:
- formats.append((width, src))
- if formats:
- formats.sort(reverse=True)
- src, formats = formats[0][1][0]["src"], formats[1:]
- else:
- src = ""
-
- fallback = self._video_fallback(formats)
+ formats = self._video_formats(sources)
+ src = next(formats, "")
date = text.parse_timestamp(src.rpartition("?")[2])
return {
@@ -306,15 +298,33 @@ class NewgroundsExtractor(Extractor):
"rating" : extr('class="rated-', '"'),
"index" : text.parse_int(index),
"_index" : index,
- "_fallback" : fallback,
+ "_fallback" : formats,
}
- @staticmethod
- def _video_fallback(formats):
- if isinstance(formats, dict):
- formats = list(formats.items())
- formats.sort(key=lambda fmt: text.parse_int(fmt[0].rstrip("p")),
- reverse=True)
+ def _video_formats(self, sources):
+ src = sources["360p"][0]["src"]
+ sub = re.compile(r"\.360p\.\w+").sub
+
+ for fmt in self.format:
+ try:
+ if isinstance(fmt, int):
+ yield sources[str(fmt) + "p"][0]["src"]
+ elif fmt in sources:
+ yield sources[fmt][0]["src"]
+ else:
+ yield sub("." + fmt, src, 1)
+ except Exception as exc:
+ self.log.debug("Video format '%s' not available (%s: %s)",
+ fmt, exc.__class__.__name__, exc)
+
+ def _video_formats_limit(self, sources):
+ formats = []
+ for fmt, src in sources.items():
+ width = text.parse_int(fmt.rstrip("p"))
+ if width <= self.format:
+ formats.append((width, src))
+
+ formats.sort(reverse=True)
for fmt in formats:
yield fmt[1][0]["src"]
diff --git a/gallery_dl/extractor/nijie.py b/gallery_dl/extractor/nijie.py
index c50c013..60cca22 100644
--- a/gallery_dl/extractor/nijie.py
+++ b/gallery_dl/extractor/nijie.py
@@ -56,7 +56,7 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
data["user_id"] = data["artist_id"]
data["user_name"] = data["artist_name"]
- urls = list(self._extract_images(image_id, page))
+ urls = self._extract_images(image_id, page)
data["count"] = len(urls)
yield Message.Directory, data
@@ -113,11 +113,14 @@ class NijieExtractor(AsynchronousMixin, BaseExtractor):
# multiple images
url = "{}/view_popup.php?id={}".format(self.root, image_id)
page = self.request(url).text
- yield from text.extract_iter(
- page, 'href="javascript:void(0);"><img src="', '"')
+ return [
+ text.extr(media, ' src="', '"')
+ for media in text.extract_iter(
+ page, 'href="javascript:void(0);"><', '>')
+ ]
else:
pos = page.find('id="view-center"') + 1
- yield text.extract(page, 'itemprop="image" src="', '"', pos)[0]
+ return (text.extr(page, 'itemprop="image" src="', '"', pos),)
@staticmethod
def _extract_user_name(page):
diff --git a/gallery_dl/extractor/nitter.py b/gallery_dl/extractor/nitter.py
index 2bce597..cfc8861 100644
--- a/gallery_dl/extractor/nitter.py
+++ b/gallery_dl/extractor/nitter.py
@@ -234,26 +234,6 @@ class NitterExtractor(BaseExtractor):
BASE_PATTERN = NitterExtractor.update({
- "nitter.net": {
- "root": "https://nitter.net",
- "pattern": r"nitter\.net",
- },
- "nitter.1d4.us": {
- "root": "https://nitter.1d4.us",
- "pattern": r"nitter\.1d4\.us",
- },
- "nitter.kavin.rocks": {
- "root": "https://nitter.kavin.rocks",
- "pattern": r"nitter\.kavin\.rocks",
- },
- "nitter.unixfox.eu": {
- "root": "https://nitter.unixfox.eu",
- "pattern": r"nitter\.unixfox\.eu",
- },
- "nitter.it": {
- "root": "https://nitter.it",
- "pattern": r"nitter\.it",
- },
})
USER_PATTERN = BASE_PATTERN + r"/(i(?:/user/|d:)(\d+)|[^/?#]+)"
diff --git a/gallery_dl/extractor/oauth.py b/gallery_dl/extractor/oauth.py
index 5571575..9d025d5 100644
--- a/gallery_dl/extractor/oauth.py
+++ b/gallery_dl/extractor/oauth.py
@@ -424,7 +424,7 @@ class OAuthPixiv(OAuthBase):
"code_challenge_method": "S256",
"client": "pixiv-android",
}
- code = self.open(url, params, self._input)
+ code = self.open(url, params, self._input_code)
url = "https://oauth.secure.pixiv.net/auth/token"
headers = {
@@ -459,7 +459,7 @@ class OAuthPixiv(OAuthBase):
stdout_write(self._generate_message(("refresh-token",), (token,)))
- def _input(self):
+ def _input_code(self):
stdout_write("""\
1) Open your browser's Developer Tools (F12) and switch to the Network tab
2) Login
@@ -471,5 +471,5 @@ class OAuthPixiv(OAuthBase):
like the entire URL or several query parameters.
""")
- code = input("code: ")
+ code = self.input("code: ")
return code.rpartition("=")[2].strip()
diff --git a/gallery_dl/extractor/philomena.py b/gallery_dl/extractor/philomena.py
index 339646f..150efed 100644
--- a/gallery_dl/extractor/philomena.py
+++ b/gallery_dl/extractor/philomena.py
@@ -24,8 +24,13 @@ class PhilomenaExtractor(BooruExtractor):
def _init(self):
self.api = PhilomenaAPI(self)
+ if not self.config("svg", True):
+ self._file_url = operator.itemgetter("view_url")
- _file_url = operator.itemgetter("view_url")
+ def _file_url(self, post):
+ if post["format"] == "svg":
+ return post["view_url"].rpartition(".")[0] + ".svg"
+ return post["view_url"]
@staticmethod
def _prepare(post):
diff --git a/gallery_dl/extractor/photobucket.py b/gallery_dl/extractor/photobucket.py
deleted file mode 100644
index a01c9fe..0000000
--- a/gallery_dl/extractor/photobucket.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2019-2023 Mike Fährmann
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-
-"""Extractors for https://photobucket.com/"""
-
-from .common import Extractor, Message
-from .. import text, exception
-import binascii
-import json
-
-
-class PhotobucketAlbumExtractor(Extractor):
- """Extractor for albums on photobucket.com"""
- category = "photobucket"
- subcategory = "album"
- directory_fmt = ("{category}", "{username}", "{location}")
- filename_fmt = "{offset:>03}{pictureId:?_//}_{titleOrFilename}.{extension}"
- archive_fmt = "{id}"
- pattern = (r"(?:https?://)?((?:[\w-]+\.)?photobucket\.com)"
- r"/user/[^/?&#]+/library(?:/[^?&#]*)?")
- example = "https://s123.photobucket.com/user/USER/library"
-
- def __init__(self, match):
- self.root = "https://" + match.group(1)
- Extractor.__init__(self, match)
-
- def _init(self):
- self.session.headers["Referer"] = self.url
-
- def items(self):
- for image in self.images():
- image["titleOrFilename"] = text.unescape(image["titleOrFilename"])
- image["title"] = text.unescape(image["title"])
- image["extension"] = image["ext"]
- yield Message.Directory, image
- yield Message.Url, image["fullsizeUrl"], image
-
- if self.config("subalbums", True):
- for album in self.subalbums():
- album["_extractor"] = PhotobucketAlbumExtractor
- yield Message.Queue, album["url"], album
-
- def images(self):
- """Yield all images of the current album"""
- url = self.url
- params = {"sort": "3", "page": 1}
-
- while True:
- page = self.request(url, params=params).text
- json_data = text.extract(page, "collectionData:", ",\n")[0]
- if not json_data:
- msg = text.extr(page, 'libraryPrivacyBlock">', "</div>")
- msg = ' ("{}")'.format(text.remove_html(msg)) if msg else ""
- self.log.error("Unable to get JSON data%s", msg)
- return
- data = json.loads(json_data)
-
- yield from data["items"]["objects"]
-
- if data["total"] <= data["offset"] + data["pageSize"]:
- self.album_path = data["currentAlbumPath"]
- return
- params["page"] += 1
-
- def subalbums(self):
- """Return all subalbum objects"""
- url = self.root + "/component/Albums-SubalbumList"
- params = {
- "albumPath": self.album_path,
- "fetchSubAlbumsOnly": "true",
- "deferCollapsed": "true",
- "json": "1",
- }
-
- data = self.request(url, params=params).json()
- return data["body"].get("subAlbums", ())
-
-
-class PhotobucketImageExtractor(Extractor):
- """Extractor for individual images from photobucket.com"""
- category = "photobucket"
- subcategory = "image"
- directory_fmt = ("{category}", "{username}")
- filename_fmt = "{pictureId:?/_/}{titleOrFilename}.{extension}"
- archive_fmt = "{username}_{id}"
- pattern = (r"(?:https?://)?(?:[\w-]+\.)?photobucket\.com"
- r"(?:/gallery/user/([^/?&#]+)/media/([^/?&#]+)"
- r"|/user/([^/?&#]+)/media/[^?&#]+\.html)")
- example = "https://s123.photobucket.com/user/USER/media/NAME.EXT.html"
-
- def __init__(self, match):
- Extractor.__init__(self, match)
- self.user = match.group(1) or match.group(3)
- self.media_id = match.group(2)
-
- def _init(self):
- self.session.headers["Referer"] = self.url
-
- def items(self):
- url = "https://photobucket.com/galleryd/search.php"
- params = {"userName": self.user, "searchTerm": "", "ref": ""}
-
- if self.media_id:
- params["mediaId"] = self.media_id
- else:
- params["url"] = self.url
-
- # retry API call up to 5 times, since it can randomly fail
- tries = 0
- while tries < 5:
- data = self.request(url, method="POST", params=params).json()
- image = data["mediaDocuments"]
- if "message" not in image:
- break # success
- tries += 1
- self.log.debug(image["message"])
- else:
- raise exception.StopExtraction(image["message"])
-
- # adjust metadata entries to be at least somewhat similar
- # to what the 'album' extractor provides
- if "media" in image:
- image = image["media"][image["mediaIndex"]]
- image["albumView"] = data["mediaDocuments"]["albumView"]
- image["username"] = image["ownerId"]
- else:
- image["fileUrl"] = image.pop("imageUrl")
-
- image.setdefault("title", "")
- image.setdefault("description", "")
- name, _, ext = image["fileUrl"].rpartition("/")[2].rpartition(".")
- image["ext"] = image["extension"] = ext
- image["titleOrFilename"] = image["title"] or name
- image["tags"] = image.pop("clarifaiTagList", [])
-
- mtype, _, mid = binascii.a2b_base64(image["id"]).partition(b":")
- image["pictureId"] = mid.decode() if mtype == b"mediaId" else ""
-
- yield Message.Directory, image
- yield Message.Url, image["fileUrl"], image
diff --git a/gallery_dl/extractor/shimmie2.py b/gallery_dl/extractor/shimmie2.py
index 67f38c4..a68f0db 100644
--- a/gallery_dl/extractor/shimmie2.py
+++ b/gallery_dl/extractor/shimmie2.py
@@ -92,6 +92,10 @@ BASE_PATTERN = Shimmie2Extractor.update({
"root": "https://rule34hentai.net",
"pattern": r"rule34hentai\.net",
},
+ "vidyapics": {
+ "root": "https://vidya.pics",
+ "pattern": r"vidya\.pics",
+ },
}) + r"/(?:index\.php\?q=/?)?"
diff --git a/gallery_dl/extractor/skeb.py b/gallery_dl/extractor/skeb.py
index 38a2d16..6ec44ba 100644
--- a/gallery_dl/extractor/skeb.py
+++ b/gallery_dl/extractor/skeb.py
@@ -7,7 +7,7 @@
"""Extractors for https://skeb.jp/"""
from .common import Extractor, Message
-from .. import text, exception
+from .. import text
import itertools
@@ -31,14 +31,15 @@ class SkebExtractor(Extractor):
if "Authorization" not in self.session.headers:
self.headers["Authorization"] = "Bearer null"
- def request(self, url, **kwargs):
- while True:
- try:
- return Extractor.request(self, url, **kwargs)
- except exception.HttpError as exc:
- if exc.status == 429 and "request_key" in exc.response.cookies:
- continue
- raise
+ def _handle_429(self, response):
+ if "request_key" in response.cookies:
+ return True
+
+ request_key = text.extr(
+ response.text, "request_key=", ";")
+ if request_key:
+ self.cookies.set("request_key", request_key, domain="skeb.jp")
+ return True
def items(self):
metadata = self.metadata()
diff --git a/gallery_dl/extractor/speakerdeck.py b/gallery_dl/extractor/speakerdeck.py
index e44fdae..3210fd8 100644
--- a/gallery_dl/extractor/speakerdeck.py
+++ b/gallery_dl/extractor/speakerdeck.py
@@ -8,45 +8,35 @@
"""Extractors for https://speakerdeck.com/"""
-from .common import Extractor, Message
+from .common import GalleryExtractor
from .. import text
+import re
-class SpeakerdeckPresentationExtractor(Extractor):
+class SpeakerdeckPresentationExtractor(GalleryExtractor):
"""Extractor for images from a presentation on speakerdeck.com"""
category = "speakerdeck"
subcategory = "presentation"
directory_fmt = ("{category}", "{user}")
filename_fmt = "{presentation}-{num:>02}.{extension}"
archive_fmt = "{presentation}_{num}"
+ root = "https://speakerdeck.com"
pattern = r"(?:https?://)?(?:www\.)?speakerdeck\.com/([^/?#]+)/([^/?#]+)"
example = "https://speakerdeck.com/USER/PRESENTATION"
def __init__(self, match):
- Extractor.__init__(self, match)
+ GalleryExtractor.__init__(self, match, "")
self.user, self.presentation = match.groups()
- self.presentation_id = None
-
- def items(self):
- data = self.get_job_metadata()
- imgs = self.get_image_urls()
- data["count"] = len(imgs)
- yield Message.Directory, data
- for data["num"], url in enumerate(imgs, 1):
- yield Message.Url, url, text.nameext_from_url(url, data)
-
- def get_job_metadata(self):
- """Collect metadata for extractor-job"""
- url = "https://speakerdeck.com/oembed.json"
+
+ def metadata(self, _):
+ url = self.root + "/oembed.json"
params = {
- "url": "https://speakerdeck.com/" + self.user +
- "/" + self.presentation,
+ "url": "{}/{}/{}".format(self.root, self.user, self.presentation),
}
-
data = self.request(url, params=params).json()
- self.presentation_id, pos = \
- text.extract(data["html"], 'src="//speakerdeck.com/player/', '"')
+ self.presentation_id = text.extr(
+ data["html"], 'src="//speakerdeck.com/player/', '"')
return {
"user": self.user,
@@ -56,8 +46,10 @@ class SpeakerdeckPresentationExtractor(Extractor):
"author": data["author_name"],
}
- def get_image_urls(self):
- """Extract and return a list of all image-urls"""
- page = self.request("https://speakerdeck.com/player/" +
- self.presentation_id).text
- return list(text.extract_iter(page, 'js-sd-slide" data-url="', '"'))
+ def images(self, _):
+ url = "{}/player/{}".format(self.root, self.presentation_id)
+ page = re.sub(r"\s+", " ", self.request(url).text)
+ return [
+ (url, None)
+ for url in text.extract_iter(page, 'js-sd-slide" data-url="', '"')
+ ]
diff --git a/gallery_dl/extractor/szurubooru.py b/gallery_dl/extractor/szurubooru.py
index 08cccab..bba1ece 100644
--- a/gallery_dl/extractor/szurubooru.py
+++ b/gallery_dl/extractor/szurubooru.py
@@ -98,13 +98,13 @@ class SzurubooruTagExtractor(SzurubooruExtractor):
subcategory = "tag"
directory_fmt = ("{category}", "{search_tags}")
archive_fmt = "t_{search_tags}_{id}_{version}"
- pattern = BASE_PATTERN + r"/posts/query=([^/?#]+)"
+ pattern = BASE_PATTERN + r"/posts(?:/query=([^/?#]*))?"
example = "https://booru.foalcon.com/posts/query=TAG"
def __init__(self, match):
SzurubooruExtractor.__init__(self, match)
- query = match.group(match.lastindex)
- self.query = text.unquote(query.replace("+", " "))
+ query = self.groups[-1]
+ self.query = text.unquote(query.replace("+", " ")) if query else ""
def metadata(self):
return {"search_tags": self.query}
@@ -119,9 +119,5 @@ class SzurubooruPostExtractor(SzurubooruExtractor):
pattern = BASE_PATTERN + r"/post/(\d+)"
example = "https://booru.foalcon.com/post/12345"
- def __init__(self, match):
- SzurubooruExtractor.__init__(self, match)
- self.post_id = match.group(match.lastindex)
-
def posts(self):
- return (self._api_request("/post/" + self.post_id),)
+ return (self._api_request("/post/" + self.groups[-1]),)
diff --git a/gallery_dl/extractor/tcbscans.py b/gallery_dl/extractor/tcbscans.py
index de6f3ee..71431ad 100644
--- a/gallery_dl/extractor/tcbscans.py
+++ b/gallery_dl/extractor/tcbscans.py
@@ -4,19 +4,23 @@
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
-"""Extractors for https://tcbscans.com/"""
+"""Extractors for https://tcbscans.me/"""
from .common import ChapterExtractor, MangaExtractor
from .. import text
-BASE_PATTERN = r"(?:https?://)?(?:tcbscans|onepiecechapters)\.com"
+BASE_PATTERN = (r"(?:https?://)?(?:tcb(?:-backup\.bihar-mirchi|scans)"
+ r"|onepiecechapters)\.(?:com|me)")
class TcbscansChapterExtractor(ChapterExtractor):
category = "tcbscans"
- root = "https://tcbscans.com"
pattern = BASE_PATTERN + r"(/chapters/\d+/[^/?#]+)"
- example = "https://tcbscans.com/chapters/12345/MANGA-chapter-123"
+ example = "https://tcbscans.me/chapters/12345/MANGA-chapter-123"
+
+ def __init__(self, match):
+ self.root = text.root_from_url(match.group(0))
+ ChapterExtractor.__init__(self, match)
def images(self, page):
return [
@@ -39,10 +43,13 @@ class TcbscansChapterExtractor(ChapterExtractor):
class TcbscansMangaExtractor(MangaExtractor):
category = "tcbscans"
- root = "https://tcbscans.com"
chapterclass = TcbscansChapterExtractor
pattern = BASE_PATTERN + r"(/mangas/\d+/[^/?#]+)"
- example = "https://tcbscans.com/mangas/123/MANGA"
+ example = "https://tcbscans.me/mangas/123/MANGA"
+
+ def __init__(self, match):
+ self.root = text.root_from_url(match.group(0))
+ MangaExtractor.__init__(self, match)
def chapters(self, page):
data = {
diff --git a/gallery_dl/extractor/twibooru.py b/gallery_dl/extractor/twibooru.py
index f57f479..a725a2c 100644
--- a/gallery_dl/extractor/twibooru.py
+++ b/gallery_dl/extractor/twibooru.py
@@ -28,8 +28,13 @@ class TwibooruExtractor(BooruExtractor):
def _init(self):
self.api = TwibooruAPI(self)
+ if not self.config("svg", True):
+ self._file_url = operator.itemgetter("view_url")
- _file_url = operator.itemgetter("view_url")
+ def _file_url(self, post):
+ if post["format"] == "svg":
+ return post["view_url"].rpartition(".")[0] + ".svg"
+ return post["view_url"]
@staticmethod
def _prepare(post):
diff --git a/gallery_dl/extractor/twitter.py b/gallery_dl/extractor/twitter.py
index ff77828..ec098aa 100644
--- a/gallery_dl/extractor/twitter.py
+++ b/gallery_dl/extractor/twitter.py
@@ -36,6 +36,7 @@ class TwitterExtractor(Extractor):
self.user = match.group(1)
def _init(self):
+ self.unavailable = self.config("unavailable", False)
self.textonly = self.config("text-tweets", False)
self.retweets = self.config("retweets", False)
self.replies = self.config("replies", True)
@@ -143,6 +144,15 @@ class TwitterExtractor(Extractor):
def _extract_media(self, tweet, entities, files):
for media in entities:
+
+ if "ext_media_availability" in media:
+ ext = media["ext_media_availability"]
+ if ext.get("status") == "Unavailable":
+ self.log.warning("Media unavailable (%s - '%s')",
+ tweet["id_str"], ext.get("reason"))
+ if not self.unavailable:
+ continue
+
descr = media.get("ext_alt_text")
width = media["original_info"].get("width", 0)
height = media["original_info"].get("height", 0)
@@ -1709,11 +1719,16 @@ class TwitterAPI():
variables["cursor"] = cursor
def _handle_ratelimit(self, response):
- if self.extractor.config("ratelimit") == "abort":
+ rl = self.extractor.config("ratelimit")
+ if rl == "abort":
raise exception.StopExtraction("Rate limit exceeded")
-
- until = response.headers.get("x-rate-limit-reset")
- self.extractor.wait(until=until, seconds=None if until else 60)
+ elif rl and isinstance(rl, str) and rl.startswith("wait:"):
+ until = None
+ seconds = text.parse_float(rl.partition(":")[2]) or 60.0
+ else:
+ until = response.headers.get("x-rate-limit-reset")
+ seconds = None if until else 60.0
+ self.extractor.wait(until=until, seconds=seconds)
def _process_tombstone(self, entry, tombstone):
text = (tombstone.get("richText") or tombstone["text"])["text"]
@@ -1849,7 +1864,7 @@ def _login_impl(extr, username, password):
},
}
elif subtask == "LoginEnterAlternateIdentifierSubtask":
- alt = extr.input(
+ alt = extr.config("username_alt") or extr.input(
"Alternate Identifier (username, email, phone number): ")
data = {
"enter_text": {
@@ -1881,8 +1896,9 @@ def _login_impl(extr, username, password):
raise exception.AuthenticationError("Login requires CAPTCHA")
elif subtask == "DenyLoginSubtask":
raise exception.AuthenticationError("Login rejected as suspicious")
- elif subtask == "ArkoseLogin":
- raise exception.AuthenticationError("No auth token cookie")
+ elif subtask == "LoginSuccessSubtask":
+ raise exception.AuthenticationError(
+ "No 'auth_token' cookie received")
else:
raise exception.StopExtraction("Unrecognized subtask %s", subtask)
diff --git a/gallery_dl/extractor/vichan.py b/gallery_dl/extractor/vichan.py
index 79d7916..654c451 100644
--- a/gallery_dl/extractor/vichan.py
+++ b/gallery_dl/extractor/vichan.py
@@ -22,10 +22,6 @@ BASE_PATTERN = VichanExtractor.update({
"root": "https://8kun.top",
"pattern": r"8kun\.top",
},
- "wikieat": {
- "root": "https://wikieat.club",
- "pattern": r"wikieat\.club",
- },
"smugloli": {
"root": None,
"pattern": r"smuglo(?:\.li|li\.net)",
diff --git a/gallery_dl/formatter.py b/gallery_dl/formatter.py
index 0b212d5..ec1c926 100644
--- a/gallery_dl/formatter.py
+++ b/gallery_dl/formatter.py
@@ -325,6 +325,26 @@ def _parse_slice(format_spec, default):
return apply_slice
+def _parse_conversion(format_spec, default):
+ conversions, _, format_spec = format_spec.partition(_SEPARATOR)
+ convs = [_CONVERSIONS[c] for c in conversions[1:]]
+ fmt = _build_format_func(format_spec, default)
+
+ if len(conversions) <= 2:
+
+ def convert_one(obj):
+ return fmt(conv(obj))
+ conv = _CONVERSIONS[conversions[1]]
+ return convert_one
+
+ def convert_many(obj):
+ for conv in convs:
+ obj = conv(obj)
+ return fmt(obj)
+ convs = [_CONVERSIONS[c] for c in conversions[1:]]
+ return convert_many
+
+
def _parse_maxlen(format_spec, default):
maxlen, replacement, format_spec = format_spec.split(_SEPARATOR, 2)
maxlen = text.parse_int(maxlen[1:])
@@ -403,6 +423,19 @@ def _parse_sort(format_spec, default):
return sort_asc
+def _parse_limit(format_spec, default):
+ limit, hint, format_spec = format_spec.split(_SEPARATOR, 2)
+ limit = int(limit[1:])
+ limit_hint = limit - len(hint)
+ fmt = _build_format_func(format_spec, default)
+
+ def apply_limit(obj):
+ if len(obj) > limit:
+ obj = obj[:limit_hint] + hint
+ return fmt(obj)
+ return apply_limit
+
+
def _default_format(format_spec, default):
def wrap(obj):
return format(obj, format_spec)
@@ -447,10 +480,12 @@ _CONVERSIONS = {
_FORMAT_SPECIFIERS = {
"?": _parse_optional,
"[": _parse_slice,
+ "C": _parse_conversion,
"D": _parse_datetime,
- "L": _parse_maxlen,
"J": _parse_join,
+ "L": _parse_maxlen,
"O": _parse_offset,
"R": _parse_replace,
"S": _parse_sort,
+ "X": _parse_limit,
}
diff --git a/gallery_dl/option.py b/gallery_dl/option.py
index 12622d0..f31d5ac 100644
--- a/gallery_dl/option.py
+++ b/gallery_dl/option.py
@@ -173,28 +173,6 @@ def build_parser():
action="version", version=version.__version__,
help="Print program version and exit",
)
- if util.EXECUTABLE:
- general.add_argument(
- "-U", "--update",
- dest="update", action="store_const", const="latest",
- help="Update to the latest version",
- )
- general.add_argument(
- "--update-to",
- dest="update", metavar="[CHANNEL@]TAG",
- help="Upgrade/downgrade to a specific version",
- )
- general.add_argument(
- "--update-check",
- dest="update", action="store_const", const="check",
- help="Check if a newer version is available",
- )
- else:
- general.add_argument(
- "-U", "--update-check",
- dest="update", action="store_const", const="check",
- help="Check if a newer version is available",
- )
general.add_argument(
"-f", "--filename",
dest="filename", metavar="FORMAT",
@@ -217,16 +195,6 @@ def build_parser():
help="Load external extractors from PATH",
)
general.add_argument(
- "--proxy",
- dest="proxy", metavar="URL", action=ConfigAction,
- help="Use the specified proxy",
- )
- general.add_argument(
- "--source-address",
- dest="source-address", metavar="IP", action=ConfigAction,
- help="Client-side IP address to bind to",
- )
- general.add_argument(
"--user-agent",
dest="user-agent", metavar="UA", action=ConfigAction,
help="User-Agent request header",
@@ -238,6 +206,31 @@ def build_parser():
"(ALL to delete everything)",
)
+ update = parser.add_argument_group("Update Options")
+ if util.EXECUTABLE or 1:
+ update.add_argument(
+ "-U", "--update",
+ dest="update", action="store_const", const="latest",
+ help="Update to the latest version",
+ )
+ update.add_argument(
+ "--update-to",
+ dest="update", metavar="CHANNEL[@TAG]",
+ help=("Switch to a dfferent release channel (stable or dev) "
+ "or upgrade/downgrade to a specific version"),
+ )
+ update.add_argument(
+ "--update-check",
+ dest="update", action="store_const", const="check",
+ help="Check if a newer version is available",
+ )
+ else:
+ update.add_argument(
+ "-U", "--update-check",
+ dest="update", action="store_const", const="check",
+ help="Check if a newer version is available",
+ )
+
input = parser.add_argument_group("Input Options")
input.add_argument(
"urls",
@@ -263,6 +256,11 @@ def build_parser():
help=("Download URLs found in FILE. "
"Delete them after they were downloaded successfully."),
)
+ input.add_argument(
+ "--no-input",
+ dest="input", nargs=0, action=ConfigConstAction, const=False,
+ help=("Do not prompt for passwords/tokens"),
+ )
output = parser.add_argument_group("Output Options")
output.add_argument(
@@ -353,23 +351,45 @@ def build_parser():
help=("Do not emit ANSI color codes in output"),
)
- downloader = parser.add_argument_group("Downloader Options")
- downloader.add_argument(
- "-r", "--limit-rate",
- dest="rate", metavar="RATE", action=ConfigAction,
- help="Maximum download rate (e.g. 500k or 2.5M)",
- )
- downloader.add_argument(
+ networking = parser.add_argument_group("Networking Options")
+ networking.add_argument(
"-R", "--retries",
dest="retries", metavar="N", type=int, action=ConfigAction,
help=("Maximum number of retries for failed HTTP requests "
"or -1 for infinite retries (default: 4)"),
)
- downloader.add_argument(
+ networking.add_argument(
"--http-timeout",
dest="timeout", metavar="SECONDS", type=float, action=ConfigAction,
help="Timeout for HTTP connections (default: 30.0)",
)
+ networking.add_argument(
+ "--proxy",
+ dest="proxy", metavar="URL", action=ConfigAction,
+ help="Use the specified proxy",
+ )
+ networking.add_argument(
+ "--source-address",
+ dest="source-address", metavar="IP", action=ConfigAction,
+ help="Client-side IP address to bind to",
+ )
+ networking.add_argument(
+ "--no-check-certificate",
+ dest="verify", nargs=0, action=ConfigConstAction, const=False,
+ help="Disable HTTPS certificate validation",
+ )
+
+ downloader = parser.add_argument_group("Downloader Options")
+ downloader.add_argument(
+ "-r", "--limit-rate",
+ dest="rate", metavar="RATE", action=ConfigAction,
+ help="Maximum download rate (e.g. 500k or 2.5M)",
+ )
+ downloader.add_argument(
+ "--chunk-size",
+ dest="chunk-size", metavar="SIZE", action=ConfigAction,
+ help="Size of in-memory data chunks (default: 32k)",
+ )
downloader.add_argument(
"--sleep",
dest="sleep", metavar="SECONDS", action=ConfigAction,
@@ -390,21 +410,6 @@ def build_parser():
"for an input URL"),
)
downloader.add_argument(
- "--filesize-min",
- dest="filesize-min", metavar="SIZE", action=ConfigAction,
- help="Do not download files smaller than SIZE (e.g. 500k or 2.5M)",
- )
- downloader.add_argument(
- "--filesize-max",
- dest="filesize-max", metavar="SIZE", action=ConfigAction,
- help="Do not download files larger than SIZE (e.g. 500k or 2.5M)",
- )
- downloader.add_argument(
- "--chunk-size",
- dest="chunk-size", metavar="SIZE", action=ConfigAction,
- help="Size of in-memory data chunks (default: 32k)",
- )
- downloader.add_argument(
"--no-part",
dest="part", nargs=0, action=ConfigConstAction, const=False,
help="Do not use .part files",
@@ -425,16 +430,6 @@ def build_parser():
dest="download", nargs=0, action=ConfigConstAction, const=False,
help=("Do not download any files")
)
- downloader.add_argument(
- "--no-postprocessors",
- dest="postprocess", nargs=0, action=ConfigConstAction, const=False,
- help=("Do not run any post processors")
- )
- downloader.add_argument(
- "--no-check-certificate",
- dest="verify", nargs=0, action=ConfigConstAction, const=False,
- help="Disable HTTPS certificate validation",
- )
configuration = parser.add_argument_group("Configuration Options")
configuration.add_argument(
@@ -461,10 +456,20 @@ def build_parser():
)
configuration.add_argument(
"--config-create",
- dest="config_init", action="store_true",
+ dest="config", action="store_const", const="init",
help="Create a basic configuration file",
)
configuration.add_argument(
+ "--config-status",
+ dest="config", action="store_const", const="status",
+ help="Show configuration file status",
+ )
+ configuration.add_argument(
+ "--config-open",
+ dest="config", action="store_const", const="open",
+ help="Open configuration file in external application",
+ )
+ configuration.add_argument(
"--config-ignore",
dest="config_load", action="store_false",
help="Do not read default configuration files",
@@ -516,12 +521,6 @@ def build_parser():
selection = parser.add_argument_group("Selection Options")
selection.add_argument(
- "--download-archive",
- dest="archive", metavar="FILE", action=ConfigAction,
- help=("Record all downloaded or skipped files in FILE and "
- "skip downloading any file already in it"),
- )
- selection.add_argument(
"-A", "--abort",
dest="abort", metavar="N", type=int,
help=("Stop current extractor run "
@@ -534,6 +533,22 @@ def build_parser():
"after N consecutive file downloads were skipped"),
)
selection.add_argument(
+ "--filesize-min",
+ dest="filesize-min", metavar="SIZE", action=ConfigAction,
+ help="Do not download files smaller than SIZE (e.g. 500k or 2.5M)",
+ )
+ selection.add_argument(
+ "--filesize-max",
+ dest="filesize-max", metavar="SIZE", action=ConfigAction,
+ help="Do not download files larger than SIZE (e.g. 500k or 2.5M)",
+ )
+ selection.add_argument(
+ "--download-archive",
+ dest="archive", metavar="FILE", action=ConfigAction,
+ help=("Record all downloaded or skipped files in FILE and "
+ "skip downloading any file already in it"),
+ )
+ selection.add_argument(
"--range",
dest="image-range", metavar="RANGE", action=ConfigAction,
help=("Index range(s) specifying which files to download. "
@@ -574,6 +589,11 @@ def build_parser():
help="Activate the specified post processor",
)
postprocessor.add_argument(
+ "--no-postprocessors",
+ dest="postprocess", nargs=0, action=ConfigConstAction, const=False,
+ help=("Do not run any post processors")
+ )
+ postprocessor.add_argument(
"-O", "--postprocessor-option",
dest="options_pp", metavar="KEY=VALUE",
action=PPParseAction, default={},
diff --git a/gallery_dl/output.py b/gallery_dl/output.py
index 3518545..bd5d959 100644
--- a/gallery_dl/output.py
+++ b/gallery_dl/output.py
@@ -105,6 +105,7 @@ class LoggerAdapterActions():
self.error = functools.partial(self.log, logging.ERROR)
def log(self, level, msg, *args, **kwargs):
+ msg = str(msg)
if args:
msg = msg % args
diff --git a/gallery_dl/postprocessor/ugoira.py b/gallery_dl/postprocessor/ugoira.py
index c63a3d9..9e60ce2 100644
--- a/gallery_dl/postprocessor/ugoira.py
+++ b/gallery_dl/postprocessor/ugoira.py
@@ -158,6 +158,7 @@ class UgoiraPP(PostProcessor):
except Exception as exc:
print()
self.log.error("%s: %s", exc.__class__.__name__, exc)
+ self.log.debug("", exc_info=True)
pathfmt.realpath = pathfmt.temppath
else:
if self.mtime:
@@ -266,8 +267,8 @@ class UgoiraPP(PostProcessor):
append("")
ffconcat = tempdir + "/ffconcat.txt"
- with open(ffconcat, "w") as file:
- file.write("\n".join(content))
+ with open(ffconcat, "w") as fp:
+ fp.write("\n".join(content))
return ffconcat
def _write_mkvmerge_timecodes(self, tempdir):
@@ -282,8 +283,8 @@ class UgoiraPP(PostProcessor):
append("")
timecodes = tempdir + "/timecodes.tc"
- with open(timecodes, "w") as file:
- file.write("\n".join(content))
+ with open(timecodes, "w") as fp:
+ fp.write("\n".join(content))
return timecodes
def calculate_framerate(self, frames):
diff --git a/gallery_dl/util.py b/gallery_dl/util.py
index 861ec7e..e76ddf3 100644
--- a/gallery_dl/util.py
+++ b/gallery_dl/util.py
@@ -106,12 +106,12 @@ def identity(x):
return x
-def true(_):
+def true(_, __=None):
"""Always returns True"""
return True
-def false(_):
+def false(_, __=None):
"""Always returns False"""
return False
@@ -540,11 +540,22 @@ class CustomNone():
def __bool__():
return False
+ __lt__ = true
+ __le__ = true
+ __eq__ = false
+ __ne__ = true
+ __gt__ = false
+ __ge__ = false
+
@staticmethod
def __len__():
return 0
@staticmethod
+ def __hash__():
+ return 0
+
+ @staticmethod
def __format__(_):
return "None"
diff --git a/gallery_dl/version.py b/gallery_dl/version.py
index 6557763..a8ff38e 100644
--- a/gallery_dl/version.py
+++ b/gallery_dl/version.py
@@ -6,5 +6,5 @@
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
-__version__ = "1.27.0"
+__version__ = "1.27.1"
__variant__ = None
diff --git a/setup.py b/setup.py
index ee66f5f..5f9dbc8 100644
--- a/setup.py
+++ b/setup.py
@@ -9,8 +9,8 @@ import warnings
def read(fname):
path = os.path.join(os.path.dirname(__file__), fname)
- with open(path, encoding="utf-8") as file:
- return file.read()
+ with open(path, encoding="utf-8") as fp:
+ return fp.read()
def check_file(fname):
diff --git a/test/test_config.py b/test/test_config.py
index 06780be..bbe288f 100644
--- a/test/test_config.py
+++ b/test/test_config.py
@@ -165,12 +165,12 @@ class TestConfig(unittest.TestCase):
def test_load(self):
with tempfile.TemporaryDirectory() as base:
path1 = os.path.join(base, "cfg1")
- with open(path1, "w") as file:
- file.write('{"a": 1, "b": {"a": 2, "c": "text"}}')
+ with open(path1, "w") as fp:
+ fp.write('{"a": 1, "b": {"a": 2, "c": "text"}}')
path2 = os.path.join(base, "cfg2")
- with open(path2, "w") as file:
- file.write('{"a": 7, "b": {"a": 8, "e": "foo"}}')
+ with open(path2, "w") as fp:
+ fp.write('{"a": 7, "b": {"a": 8, "e": "foo"}}')
config.clear()
config.load((path1,))
@@ -208,8 +208,8 @@ class TestConfigFiles(unittest.TestCase):
def _load(name):
path = os.path.join(ROOTDIR, "docs", name)
try:
- with open(path) as file:
- return util.json_loads(file.read())
+ with open(path) as fp:
+ return util.json_loads(fp.read())
except FileNotFoundError:
raise unittest.SkipTest(path + " not available")
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 208645d..60c83ff 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -28,14 +28,14 @@ class TestCookiejar(unittest.TestCase):
cls.path = tempfile.TemporaryDirectory()
cls.cookiefile = join(cls.path.name, "cookies.txt")
- with open(cls.cookiefile, "w") as file:
- file.write("""# HTTP Cookie File
+ with open(cls.cookiefile, "w") as fp:
+ fp.write("""# HTTP Cookie File
.example.org\tTRUE\t/\tFALSE\t253402210800\tNAME\tVALUE
""")
cls.invalid_cookiefile = join(cls.path.name, "invalid.txt")
- with open(cls.invalid_cookiefile, "w") as file:
- file.write("""# asd
+ with open(cls.invalid_cookiefile, "w") as fp:
+ fp.write("""# asd
.example.org\tTRUE/FALSE\t253402210800\tNAME\tVALUE
""")
diff --git a/test/test_downloader.py b/test/test_downloader.py
index 9f9fb3b..f88b2c0 100644
--- a/test/test_downloader.py
+++ b/test/test_downloader.py
@@ -136,8 +136,8 @@ class TestDownloaderBase(unittest.TestCase):
if content:
mode = "w" + ("b" if isinstance(content, bytes) else "")
- with pathfmt.open(mode) as file:
- file.write(content)
+ with pathfmt.open(mode) as fp:
+ fp.write(content)
return pathfmt
@@ -151,8 +151,8 @@ class TestDownloaderBase(unittest.TestCase):
# test content
mode = "r" + ("b" if isinstance(output, bytes) else "")
- with pathfmt.open(mode) as file:
- content = file.read()
+ with pathfmt.open(mode) as fp:
+ content = fp.read()
self.assertEqual(content, output)
# test filename extension
diff --git a/test/test_formatter.py b/test/test_formatter.py
index 73e958c..e00af85 100644
--- a/test/test_formatter.py
+++ b/test/test_formatter.py
@@ -267,6 +267,18 @@ class TestFormatter(unittest.TestCase):
"{a:Sort-reverse}", # starts with 'S', contains 'r'
"['w', 'r', 'o', 'l', 'h', 'd', 'O', 'L', 'L', 'E', ' ']")
+ def test_specifier_conversions(self):
+ self._run_test("{a:Cl}" , "hello world")
+ self._run_test("{h:CHC}" , "Foo & Bar")
+ self._run_test("{l:CSulc}", "A, b, c")
+
+ def test_specifier_limit(self):
+ self._run_test("{a:X20/ */}", "hElLo wOrLd")
+ self._run_test("{a:X10/ */}", "hElLo wO *")
+
+ with self.assertRaises(ValueError):
+ self._run_test("{a:Xfoo/ */}", "hello wo *")
+
def test_chain_special(self):
# multiple replacements
self._run_test("{a:Rh/C/RE/e/RL/l/}", "Cello wOrld")
diff --git a/test/test_results.py b/test/test_results.py
index aaa71ec..ab3668e 100644
--- a/test/test_results.py
+++ b/test/test_results.py
@@ -25,7 +25,6 @@ from test import results # noqa E402
# temporary issues, etc.
BROKEN = {
- "photobucket",
}
CONFIG = {
diff --git a/test/test_util.py b/test/test_util.py
index 83b44b7..35e7247 100644
--- a/test/test_util.py
+++ b/test/test_util.py
@@ -745,6 +745,7 @@ def hash(value):
self.assertEqual(repr(obj), repr(None))
self.assertEqual(format(obj), str(None))
self.assertEqual(format(obj, "%F"), str(None))
+
self.assertIs(obj.attr, obj)
self.assertIs(obj["key"], obj)
self.assertIs(obj(), obj)
@@ -752,6 +753,17 @@ def hash(value):
self.assertIs(obj(foo="bar"), obj)
self.assertEqual(util.json_dumps(obj), "null")
+ self.assertLess(obj, "foo")
+ self.assertLessEqual(obj, None)
+ self.assertFalse(obj == obj)
+ self.assertTrue(obj != obj)
+ self.assertGreater(123, obj)
+ self.assertGreaterEqual(1.23, obj)
+
+ mapping = {}
+ mapping[obj] = 123
+ self.assertIn(obj, mapping)
+
i = 0
for _ in obj:
i += 1