aboutsummaryrefslogtreecommitdiffstats
path: root/data
diff options
context:
space:
mode:
authorLibravatarUnit 193 <unit193@unit193.net>2025-08-16 07:00:40 -0400
committerLibravatarUnit 193 <unit193@unit193.net>2025-08-16 07:00:40 -0400
commit22e8d9823eb9fb802c926fb03a5fdccbea26f878 (patch)
treed399937a3bf139d386b8f5df2fc646b751c14719 /data
parent0839cde5064bd6000162ee23b8445b99afe10068 (diff)
parent3d18761f620a294ea6c5bff13c5994b93b29f3ed (diff)
Update upstream source from tag 'upstream/1.30.3'
Update to upstream version '1.30.3' with Debian dir cbd3490f51b0ee3f2e172965318cd079b856367d
Diffstat (limited to 'data')
-rw-r--r--data/completion/_gallery-dl4
-rw-r--r--data/completion/gallery-dl.fish4
-rw-r--r--data/man/gallery-dl.18
-rw-r--r--data/man/gallery-dl.conf.5250
4 files changed, 205 insertions, 61 deletions
diff --git a/data/completion/_gallery-dl b/data/completion/_gallery-dl
index f0d654e..07cfcd9 100644
--- a/data/completion/_gallery-dl
+++ b/data/completion/_gallery-dl
@@ -72,8 +72,8 @@ _arguments -s -S \
{-C,--cookies}'[File to load additional cookies from]':'<file>':_files \
--cookies-export'[Export session cookies to FILE]':'<file>':_files \
--cookies-from-browser'[Name of the browser to load cookies from, with optional domain prefixed with '\''/'\'', keyring name prefixed with '\''+'\'', profile prefixed with '\'':'\'', and container prefixed with '\''::'\'' ('\''none'\'' for no container (default), '\''all'\'' for all containers)]':'<browser[/domain][+keyring][:profile][::container]>' \
-{-A,--abort}'[Stop current extractor run after N consecutive file downloads were skipped]':'<n>' \
-{-T,--terminate}'[Stop current and parent extractor runs after N consecutive file downloads were skipped]':'<n>' \
+{-A,--abort}'[Stop current extractor(s) after N consecutive file downloads were skipped. Specify a TARGET to set how many levels to ascend or to which subcategory to jump to. Examples: '\''-A 3'\'', '\''-A 3:2'\'', '\''-A 3:manga'\'']':'<n[:target]>' \
+{-T,--terminate}'[Stop current & parent extractors and proceed with the next input URL after N consecutive file downloads were skipped]':'<n>' \
--filesize-min'[Do not download files smaller than SIZE (e.g. 500k or 2.5M)]':'<size>' \
--filesize-max'[Do not download files larger than SIZE (e.g. 500k or 2.5M)]':'<size>' \
--download-archive'[Record successfully downloaded files in FILE and skip downloading any file already in it]':'<file>':_files \
diff --git a/data/completion/gallery-dl.fish b/data/completion/gallery-dl.fish
index 8eb427a..b7e4fe4 100644
--- a/data/completion/gallery-dl.fish
+++ b/data/completion/gallery-dl.fish
@@ -67,8 +67,8 @@ complete -c gallery-dl -l 'netrc' -d 'Enable .netrc authentication data'
complete -c gallery-dl -r -F -s 'C' -l 'cookies' -d 'File to load additional cookies from'
complete -c gallery-dl -r -F -l 'cookies-export' -d 'Export session cookies to FILE'
complete -c gallery-dl -x -l 'cookies-from-browser' -d 'Name of the browser to load cookies from, with optional domain prefixed with "/", keyring name prefixed with "+", profile prefixed with ":", and container prefixed with "::" ("none" for no container (default), "all" for all containers)'
-complete -c gallery-dl -x -s 'A' -l 'abort' -d 'Stop current extractor run after N consecutive file downloads were skipped'
-complete -c gallery-dl -x -s 'T' -l 'terminate' -d 'Stop current and parent extractor runs after N consecutive file downloads were skipped'
+complete -c gallery-dl -x -s 'A' -l 'abort' -d 'Stop current extractor(s) after N consecutive file downloads were skipped. Specify a TARGET to set how many levels to ascend or to which subcategory to jump to. Examples: "-A 3", "-A 3:2", "-A 3:manga"'
+complete -c gallery-dl -x -s 'T' -l 'terminate' -d 'Stop current & parent extractors and proceed with the next input URL after N consecutive file downloads were skipped'
complete -c gallery-dl -x -l 'filesize-min' -d 'Do not download files smaller than SIZE (e.g. 500k or 2.5M)'
complete -c gallery-dl -x -l 'filesize-max' -d 'Do not download files larger than SIZE (e.g. 500k or 2.5M)'
complete -c gallery-dl -r -F -l 'download-archive' -d 'Record successfully downloaded files in FILE and skip downloading any file already in it'
diff --git a/data/man/gallery-dl.1 b/data/man/gallery-dl.1
index 4979279..39b88a4 100644
--- a/data/man/gallery-dl.1
+++ b/data/man/gallery-dl.1
@@ -1,4 +1,4 @@
-.TH "GALLERY-DL" "1" "2025-07-27" "1.30.2" "gallery-dl Manual"
+.TH "GALLERY-DL" "1" "2025-08-15" "1.30.3" "gallery-dl Manual"
.\" disable hyphenation
.nh
@@ -218,11 +218,11 @@ Export session cookies to FILE
.B "\-\-cookies\-from\-browser" \f[I]BROWSER[/DOMAIN][+KEYRING][:PROFILE][::CONTAINER]\f[]
Name of the browser to load cookies from, with optional domain prefixed with '/', keyring name prefixed with '+', profile prefixed with ':', and container prefixed with '::' ('none' for no container (default), 'all' for all containers)
.TP
-.B "\-A, \-\-abort" \f[I]N\f[]
-Stop current extractor run after N consecutive file downloads were skipped
+.B "\-A, \-\-abort" \f[I]N[:TARGET]\f[]
+Stop current extractor(s) after N consecutive file downloads were skipped. Specify a TARGET to set how many levels to ascend or to which subcategory to jump to. Examples: '-A 3', '-A 3:2', '-A 3:manga'
.TP
.B "\-T, \-\-terminate" \f[I]N\f[]
-Stop current and parent extractor runs after N consecutive file downloads were skipped
+Stop current & parent extractors and proceed with the next input URL after N consecutive file downloads were skipped
.TP
.B "\-\-filesize\-min" \f[I]SIZE\f[]
Do not download files smaller than SIZE (e.g. 500k or 2.5M)
diff --git a/data/man/gallery-dl.conf.5 b/data/man/gallery-dl.conf.5
index 12eea08..d33a147 100644
--- a/data/man/gallery-dl.conf.5
+++ b/data/man/gallery-dl.conf.5
@@ -1,4 +1,4 @@
-.TH "GALLERY-DL.CONF" "5" "2025-07-27" "1.30.2" "gallery-dl Manual"
+.TH "GALLERY-DL.CONF" "5" "2025-08-15" "1.30.3" "gallery-dl Manual"
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
@@ -330,6 +330,22 @@ depending on the local operating system
* \f[I]"windows"\f[]: \f[I]". "\f[]
+.SS extractor.*.path-convert
+.IP "Type:" 6
+\f[I]Conversion(s)\f[]
+
+.IP "Example:" 4
+.br
+* "g"
+.br
+* "Wl"
+
+.IP "Description:" 4
+\f[I]Conversion(s)\f[] to apply to each path segment after
+\f[I]path-restrict\f[]
+replacements.
+
+
.SS extractor.*.path-extended
.IP "Type:" 6
\f[I]bool\f[]
@@ -371,36 +387,59 @@ A JSON \f[I]object\f[] mapping filename extensions to their replacements.
.IP "Default:" 9
\f[I]true\f[]
+.IP "Example:" 4
+.br
+* "abort:5"
+.br
+* "abort:5:2"
+.br
+* "abort:5:manga"
+.br
+* "terminate:3"
+
.IP "Description:" 4
Controls the behavior when downloading files that have been
downloaded before, i.e. a file with the same filename already
exists or its ID is in a \f[I]download archive\f[].
-.br
-* \f[I]true\f[]: Skip downloads
-.br
-* \f[I]false\f[]: Overwrite already existing files
+\f[I]true\f[]
+Skip downloads
+\f[I]false\f[]
+Overwrite already existing files
+\f[I]"abort"\f[]
+Stop the current extractor
+\f[I]"abort:N"\f[]
+Skip downloads and
+stop the current extractor after \f[I]N\f[] consecutive skips
+\f[I]"abort:N:L"\f[]
+Skip downloads and
.br
-* \f[I]"abort"\f[]: Stop the current extractor run
+stop the current extractor after \f[I]N\f[] consecutive skips
+Ascend \f[I]L\f[] levels in the extractor hierarchy
.br
-* \f[I]"abort:N"\f[]: Skip downloads and stop the current extractor run
-after \f[I]N\f[] consecutive skips
-
+\f[I]"abort:N:SC"\f[]
+Skip downloads and
.br
-* \f[I]"terminate"\f[]: Stop the current extractor run, including parent extractors
+stop the current extractor after \f[I]N\f[] consecutive skips
+Ascend to an extractor with subcategory \f[I]SC\f[] in the extractor hierarchy
.br
-* \f[I]"terminate:N"\f[]: Skip downloads and stop the current extractor run,
-including parent extractors, after \f[I]N\f[] consecutive skips
-.br
-* \f[I]"exit"\f[]: Exit the program altogether
-.br
-* \f[I]"exit:N"\f[]: Skip downloads and exit the program
+\f[I]"terminate"\f[]
+Stop the current extractor, including parent extractors
+\f[I]"terminate:N"\f[]
+Skip downloads and
+stop the current extractor, including parent extractors,
after \f[I]N\f[] consecutive skips
-.br
-* \f[I]"enumerate"\f[]: Add an enumeration index to the beginning of the
+\f[I]"exit"\f[]
+Exit the program altogether
+\f[I]"exit:N"\f[]
+Skip downloads and
+exit the program after \f[I]N\f[] consecutive skips
+
+\f[I]"enumerate"\f[]
+Add an enumeration index to the beginning of the
filename extension (\f[I]file.1.ext\f[], \f[I]file.2.ext\f[], etc.)
@@ -457,6 +496,7 @@ response before \f[I]retrying\f[] the request.
* \f[I]"0.5-1.5"\f[]
\f[I]ao3\f[],
\f[I]arcalive\f[],
+\f[I]booth\f[],
\f[I]civitai\f[],
\f[I][Danbooru]\f[],
\f[I][E621]\f[],
@@ -498,7 +538,6 @@ response before \f[I]retrying\f[] the request.
* \f[I]"3.0-6.0"\f[]
\f[I]bilibili\f[],
\f[I]exhentai\f[],
-\f[I]idolcomplex\f[],
\f[I][reactor]\f[],
\f[I]readcomiconline\f[]
.br
@@ -831,12 +870,23 @@ or a \f[I]list\f[] with IP and explicit port number as elements.
.br
* \f[I]"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:LATEST) Gecko/20100101 Firefox/LATEST"\f[]: otherwise
+.IP "Example:" 4
+.br
+* "curl/8.14.1"
+.br
+* "browser"
+.br
+* "@chrome"
+
.IP "Description:" 4
User-Agent header value used for HTTP requests.
Setting this value to \f[I]"browser"\f[] will try to automatically detect
and use the \f[I]User-Agent\f[] header of the system's default browser.
+Setting this value to \f[I]"@BROWSER"\f[], e.g. \f[I]"@chrome"\f[], will try to automatically detect
+and use the \f[I]User-Agent\f[] header of this installed browser.
+
.SS extractor.*.browser
.IP "Type:" 6
@@ -969,7 +1019,7 @@ to use these browser's default ciphers.
.IP "Default:" 9
.br
-* \f[I]false\f[]: \f[I]artstation\f[], \f[I]behance\f[]
+* \f[I]false\f[]: \f[I]artstation\f[], \f[I]behance\f[], \f[I]vsco\f[]
.br
* \f[I]true\f[]: otherwise
@@ -2062,6 +2112,24 @@ Possibly available formats are
* \f[I]tiny\f[] (144p)
+.SS extractor.booth.strategy
+.IP "Type:" 6
+\f[I]string\f[]
+
+.IP "Default:" 9
+\f[I]"webpage"\f[]
+
+.IP "Description:" 4
+Selects how to handle and extract file URLs.
+
+\f[I]"webpage"\f[]
+Retrieve the full HTML page
+and extract file URLs from it
+\f[I]"fallback"\f[]
+Use \f[I]fallback\f[] URLs
+to guess each file's correct filename extension
+
+
.SS extractor.bunkr.endpoint
.IP "Type:" 6
\f[I]string\f[]
@@ -2175,6 +2243,8 @@ Possible values are
* \f[I]"user-images"\f[]
.br
* \f[I]"user-videos"\f[]
+.br
+* \f[I]"user-collections"\f[]
It is possible to use \f[I]"all"\f[] instead of listing all values separately.
@@ -2293,6 +2363,26 @@ Use \f[I]+\f[] as first character to add the given options to the
\f[I]quality\f[] ones.
+.SS extractor.comick.lang
+.IP "Type:" 6
+.br
+* \f[I]string\f[]
+.br
+* \f[I]list\f[] of \f[I]strings\f[]
+
+.IP "Example:" 4
+.br
+* "en"
+.br
+* "fr,it,pl"
+.br
+* ["fr", "it", "pl"]
+
+.IP "Description:" 4
+\f[I]ISO 639-1\f[] language codes
+to filter chapters by.
+
+
.SS extractor.cyberdrop.domain
.IP "Type:" 6
\f[I]string\f[]
@@ -3092,9 +3182,13 @@ when processing a user profile.
Supported values are
.br
+* \f[I]info\f[]
+.br
* \f[I]avatar\f[]
.br
* \f[I]photos\f[]
+.br
+* \f[I]albums\f[]
It is possible to use \f[I]"all"\f[] instead of listing all values separately.
@@ -3485,6 +3579,22 @@ to attempt to fetch the current value used by gofile.
Recursively download files from subfolders.
+.SS extractor.hentaifoundry.descriptions
+.IP "Type:" 6
+\f[I]string\f[]
+
+.IP "Default:" 9
+\f[I]"text"\f[]
+
+.IP "Description:" 4
+Controls the format of \f[I]description\f[] metadata fields.
+
+.br
+* \f[I]"text"\f[]: Plain text with HTML tags removed
+.br
+* \f[I]"html"\f[]: Raw HTML content
+
+
.SS extractor.hentaifoundry.include
.IP "Type:" 6
.br
@@ -3937,29 +4047,11 @@ Extract a user's announcements as \f[I]announcements\f[] metadata.
.IP "Description:" 4
API endpoint to use for retrieving creator posts.
-\f[I]"legacy"\f[]
-Use the results from
-.br
-\f[I]/v1/{service}/user/{creator_id}/posts-legacy\f[]
-Provides less metadata, but is more reliable at returning all posts.
-.br
-Supports filtering results by \f[I]tag\f[] query parameter.
-.br
-\f[I]"legacy+"\f[]
-Use the results from
-.br
-\f[I]/v1/{service}/user/{creator_id}/posts-legacy\f[]
-to retrieve post IDs
-and one request to
-.br
-\f[I]/v1/{service}/user/{creator_id}/post/{post_id}\f[]
-to get a full set of metadata for each.
-\f[I]"posts"\f[]
-Use the results from
-.br
-\f[I]/v1/{service}/user/{creator_id}\f[]
-Provides more metadata, but might not return a creator's first/last posts.
-.br
+\f[I]"posts"\f[] \f[I] \f[I]"legacy"\f[]
+Provides only limited metadata.
+\f[I]"posts+"\f[] \f[] \f[I]"legacy+"\f[]
+Provides full metadata,
+but requires an additional API request for each post.
.SS extractor.kemono.favorites
@@ -5497,6 +5589,40 @@ Download animated images as \f[I].gif\f[] instead of \f[I].webp\f[]
Download article images.
+.SS extractor.skeb.include
+.IP "Type:" 6
+.br
+* \f[I]string\f[]
+.br
+* \f[I]list\f[] of \f[I]strings\f[]
+
+.IP "Default:" 9
+.br
+* \f[I]["works", "sentrequests"]\f[]
+if \f[I]sent-requests\f[] are enabled
+.br
+* \f[I]["works"]\f[] otherwise
+
+.IP "Example:" 4
+.br
+* "works,sentrequests"
+.br
+* ["works", "sentrequests"]
+
+.IP "Description:" 4
+A (comma-separated) list of subcategories to include
+when processing a user profile.
+
+Possible values are
+
+.br
+* \f[I]"works"\f[]
+.br
+* \f[I]"sentrequests"\f[]
+
+It is possible to use \f[I]"all"\f[] instead of listing all values separately.
+
+
.SS extractor.skeb.sent-requests
.IP "Type:" 6
\f[I]bool\f[]
@@ -9589,9 +9715,9 @@ section of your account's preferences
.br
* select "installed app"
.br
-* set \f[I]http://localhost:6414/\f[] as "redirect uri"
+* set "redirect uri" to http://localhost:6414/
.br
-* solve the "I'm not a robot" reCAPTCHA if needed
+* solve the "I'm not a robot" challenge if needed
.br
* click "create app"
@@ -9620,11 +9746,21 @@ new \f[I]client-id\f[] (\f[I]gallery-dl oauth:reddit\f[])
.br
* login and \f[I]Apply for an API Key\f[]
.br
-* use a random name and description,
-set "Type" to "Application", "Platform" to "All",
-and "Use" to "Non-Commercial"
+* fill out the form:
+
+.br
+* choose a random name and description
+.br
+* set "Type" to "Application"
+.br
+* set "Platform" to "All"
+.br
+* set "Use" to "Non-Commercial"
+.br
+* tick the two checkboxes at the bottom
.br
-* fill out the two checkboxes at the bottom and click "Apply"
+* click "Apply"
+
.br
* copy \f[I]API Key\f[] and \f[I]API Secret\f[]
and put them in your configuration file
@@ -9642,11 +9778,19 @@ as \f[I]"api-key"\f[] and \f[I]"api-secret"\f[]
.br
* click "Register application"
.br
-* fill out the form: use a random name and description, set
-https://example.org/ as "Application Website" and "Default
-callback URL"
+* fill out the form:
+
+.br
+* choose a random name and description
.br
-* solve Google's "I'm not a robot" challenge and click "Register"
+* set "Application Website" to https://example.org/
+.br
+* set "Default callback URL" to https://example.org/
+.br
+* solve the "I'm not a robot" challenge
+.br
+* click "Register"
+
.br
* click "Show secret key" (below "OAuth Consumer Key")
.br