diff options
Diffstat (limited to 'data/completion/_gallery-dl')
| -rw-r--r-- | data/completion/_gallery-dl | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/data/completion/_gallery-dl b/data/completion/_gallery-dl index 436260b..15806e8 100644 --- a/data/completion/_gallery-dl +++ b/data/completion/_gallery-dl @@ -11,7 +11,7 @@ _arguments -C -S \ {-i,--input-file}'[Download URLs found in FILE ("-" for stdin). More than one --input-file can be specified]':'<file>':_files \ --cookies'[File to load additional cookies from]':'<file>':_files \ --proxy'[Use the specified proxy]':'<url>' \ ---clear-cache'[Delete all cached login sessions, cookies, etc.]':'<module>' \ +--clear-cache'[Delete cached login sessions, cookies, etc. for MODULE (ALL to delete everything)]':'<module>' \ {-q,--quiet}'[Activate quiet mode]' \ {-v,--verbose}'[Print various debugging information]' \ {-g,--get-urls}'[Print URLs instead of downloading]' \ @@ -27,7 +27,6 @@ _arguments -C -S \ --write-pages'[Write downloaded intermediary pages to files in the current directory to debug problems]' \ {-r,--limit-rate}'[Maximum download rate (e.g. 500k or 2.5M)]':'<rate>' \ {-R,--retries}'[Maximum number of retries for failed HTTP requests or -1 for infinite retries (default: 4)]':'<n>' \ -{-A,--abort}'[Abort extractor run after N consecutive file downloads have been skipped, e.g. if files with the same filename already exist]':'<n>' \ --http-timeout'[Timeout for HTTP connections (default: 30.0)]':'<seconds>' \ --sleep'[Number of seconds to sleep before each download]':'<seconds>' \ --filesize-min'[Do not download files smaller than SIZE (e.g. 500k or 2.5M)]':'<size>' \ @@ -44,7 +43,9 @@ _arguments -C -S \ {-u,--username}'[Username to login with]':'<user>' \ {-p,--password}'[Password belonging to the given username]':'<pass>' \ --netrc'[Enable .netrc authentication data]' \ ---download-archive'[Record all downloaded files in the archive file and skip downloading any file already in it.]':'<file>':_files \ +--download-archive'[Record all downloaded files in the archive file and skip downloading any file already in it]':'<file>':_files \ +{-A,--abort}'[Stop current extractor run after N consecutive file downloads were skipped]':'<n>' \ +{-T,--terminate}'[Stop current and parent extractor runs after N consecutive file downloads were skipped]':'<n>' \ --range'[Index-range(s) specifying which images to download. For example "5-10" or "1,3-5,10-"]':'<range>' \ --chapter-range'[Like "--range", but applies to manga-chapters and other delegated URLs]':'<range>' \ --filter'[Python expression controlling which images to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by "-K". Example: --filter "image_width >= 1000 and rating in ("s", "q")"]':'<expr>' \ @@ -56,6 +57,7 @@ _arguments -C -S \ --write-tags'[Write image tags to separate text files]' \ --mtime-from-date'[Set file modification times according to "date" metadata]' \ --exec'[Execute CMD for each downloaded file. Example: --exec "convert {} {}.png && rm {}"]':'<cmd>' \ ---exec-after'[Execute CMD after all files were downloaded successfully. Example: --exec-after "cd {} && convert * ../doc.pdf"]':'<cmd>' && rc=0 +--exec-after'[Execute CMD after all files were downloaded successfully. Example: --exec-after "cd {} && convert * ../doc.pdf"]':'<cmd>' \ +{-P,--postprocessor}'[Activate the specified post processor]':'<name>' && rc=0 return rc |
