From a24ec1647aeac35a63b744ea856011ad6e06be3b Mon Sep 17 00:00:00 2001 From: Unit 193 Date: Sat, 20 Dec 2025 05:49:04 -0500 Subject: New upstream version 1.31.1. --- CHANGELOG.md | 60 ++- PKG-INFO | 14 +- README.rst | 11 +- data/completion/_gallery-dl | 6 +- data/completion/gallery-dl | 2 +- data/completion/gallery-dl.fish | 6 +- data/man/gallery-dl.1 | 12 +- data/man/gallery-dl.conf.5 | 642 ++++++++++++++++++++++++---- docs/gallery-dl.conf | 98 ++++- gallery_dl.egg-info/PKG-INFO | 14 +- gallery_dl.egg-info/SOURCES.txt | 19 +- gallery_dl/__init__.py | 4 +- gallery_dl/actions.py | 5 + gallery_dl/cookies.py | 2 +- gallery_dl/downloader/__init__.py | 2 +- gallery_dl/downloader/common.py | 11 +- gallery_dl/downloader/http.py | 18 +- gallery_dl/downloader/ytdl.py | 295 ++++++++----- gallery_dl/dt.py | 115 +++++ gallery_dl/extractor/2ch.py | 18 +- gallery_dl/extractor/2chan.py | 2 +- gallery_dl/extractor/2chen.py | 73 ++-- gallery_dl/extractor/35photo.py | 2 +- gallery_dl/extractor/4archive.py | 11 +- gallery_dl/extractor/4chan.py | 2 +- gallery_dl/extractor/4chanarchives.py | 2 +- gallery_dl/extractor/500px.py | 12 +- gallery_dl/extractor/8chan.py | 13 +- gallery_dl/extractor/8muses.py | 5 +- gallery_dl/extractor/__init__.py | 18 +- gallery_dl/extractor/adultempire.py | 2 +- gallery_dl/extractor/agnph.py | 10 +- gallery_dl/extractor/ao3.py | 38 +- gallery_dl/extractor/arcalive.py | 13 +- gallery_dl/extractor/arena.py | 89 ++++ gallery_dl/extractor/artstation.py | 7 +- gallery_dl/extractor/aryion.py | 117 ++++- gallery_dl/extractor/audiochan.py | 158 +++++++ gallery_dl/extractor/batoto.py | 17 +- gallery_dl/extractor/bbc.py | 7 +- gallery_dl/extractor/behance.py | 4 +- gallery_dl/extractor/bellazon.py | 47 +- gallery_dl/extractor/bilibili.py | 2 +- gallery_dl/extractor/blogger.py | 20 +- gallery_dl/extractor/bluesky.py | 45 +- gallery_dl/extractor/booru.py | 2 +- gallery_dl/extractor/boosty.py | 30 +- gallery_dl/extractor/booth.py | 5 +- gallery_dl/extractor/bunkr.py | 37 +- gallery_dl/extractor/catbox.py | 4 +- gallery_dl/extractor/cfake.py | 149 +++++++ gallery_dl/extractor/chevereto.py | 73 ++-- gallery_dl/extractor/cien.py | 14 +- gallery_dl/extractor/civitai.py | 138 +++--- gallery_dl/extractor/comedywildlifephoto.py | 51 +++ gallery_dl/extractor/comick.py | 12 +- gallery_dl/extractor/comicvine.py | 2 +- gallery_dl/extractor/common.py | 47 +- gallery_dl/extractor/cyberdrop.py | 20 +- gallery_dl/extractor/cyberfile.py | 58 ++- gallery_dl/extractor/danbooru.py | 58 ++- gallery_dl/extractor/dankefuerslesen.py | 6 +- gallery_dl/extractor/desktopography.py | 8 +- gallery_dl/extractor/deviantart.py | 50 +-- gallery_dl/extractor/directlink.py | 2 +- gallery_dl/extractor/discord.py | 18 +- gallery_dl/extractor/dynastyscans.py | 22 +- gallery_dl/extractor/e621.py | 40 +- gallery_dl/extractor/eporner.py | 54 +++ gallery_dl/extractor/erome.py | 16 +- gallery_dl/extractor/everia.py | 20 +- gallery_dl/extractor/exhentai.py | 22 +- gallery_dl/extractor/facebook.py | 49 ++- gallery_dl/extractor/fanbox.py | 63 +-- gallery_dl/extractor/fansly.py | 26 +- gallery_dl/extractor/fantia.py | 4 +- gallery_dl/extractor/fapachi.py | 2 +- gallery_dl/extractor/fapello.py | 16 +- gallery_dl/extractor/fikfap.py | 105 +++++ gallery_dl/extractor/fitnakedgirls.py | 208 +++++++++ gallery_dl/extractor/flickr.py | 43 +- gallery_dl/extractor/foolfuuka.py | 10 +- gallery_dl/extractor/foolslide.py | 6 +- gallery_dl/extractor/furaffinity.py | 24 +- gallery_dl/extractor/furry34.py | 9 +- gallery_dl/extractor/gelbooru.py | 18 +- gallery_dl/extractor/gelbooru_v01.py | 9 +- gallery_dl/extractor/gelbooru_v02.py | 14 +- gallery_dl/extractor/generic.py | 10 +- gallery_dl/extractor/girlsreleased.py | 10 +- gallery_dl/extractor/girlswithmuscle.py | 15 +- gallery_dl/extractor/gofile.py | 14 +- gallery_dl/extractor/hatenablog.py | 20 +- gallery_dl/extractor/hentai2read.py | 2 +- gallery_dl/extractor/hentaicosplays.py | 2 +- gallery_dl/extractor/hentaifoundry.py | 37 +- gallery_dl/extractor/hentaihand.py | 3 +- gallery_dl/extractor/hentaihere.py | 2 +- gallery_dl/extractor/hiperdex.py | 10 +- gallery_dl/extractor/hitomi.py | 2 +- gallery_dl/extractor/hotleak.py | 10 +- gallery_dl/extractor/idolcomplex.py | 9 +- gallery_dl/extractor/imagebam.py | 14 +- gallery_dl/extractor/imagechest.py | 10 +- gallery_dl/extractor/imagefap.py | 20 +- gallery_dl/extractor/imagehosts.py | 101 +++-- gallery_dl/extractor/imgbb.py | 5 +- gallery_dl/extractor/imgbox.py | 11 +- gallery_dl/extractor/imgpile.py | 2 +- gallery_dl/extractor/imgth.py | 2 +- gallery_dl/extractor/imgur.py | 29 +- gallery_dl/extractor/imhentai.py | 9 +- gallery_dl/extractor/inkbunny.py | 24 +- gallery_dl/extractor/instagram.py | 88 ++-- gallery_dl/extractor/issuu.py | 6 +- gallery_dl/extractor/itaku.py | 64 +-- gallery_dl/extractor/itchio.py | 2 +- gallery_dl/extractor/iwara.py | 15 +- gallery_dl/extractor/jschan.py | 6 +- gallery_dl/extractor/kabeuchi.py | 5 +- gallery_dl/extractor/keenspot.py | 2 +- gallery_dl/extractor/kemono.py | 102 ++--- gallery_dl/extractor/khinsider.py | 2 +- gallery_dl/extractor/komikcast.py | 10 +- gallery_dl/extractor/koofr.py | 55 +++ gallery_dl/extractor/leakgallery.py | 10 +- gallery_dl/extractor/lensdump.py | 9 +- gallery_dl/extractor/lexica.py | 2 +- gallery_dl/extractor/lightroom.py | 2 +- gallery_dl/extractor/livedoor.py | 4 +- gallery_dl/extractor/lofter.py | 4 +- gallery_dl/extractor/lolisafe.py | 4 +- gallery_dl/extractor/luscious.py | 6 +- gallery_dl/extractor/lynxchan.py | 6 +- gallery_dl/extractor/madokami.py | 5 +- gallery_dl/extractor/mangadex.py | 22 +- gallery_dl/extractor/mangafox.py | 8 +- gallery_dl/extractor/mangahere.py | 4 +- gallery_dl/extractor/manganelo.py | 16 +- gallery_dl/extractor/mangapark.py | 12 +- gallery_dl/extractor/mangaread.py | 4 +- gallery_dl/extractor/mangataro.py | 6 +- gallery_dl/extractor/mangoxo.py | 4 +- gallery_dl/extractor/mastodon.py | 25 +- gallery_dl/extractor/message.py | 9 +- gallery_dl/extractor/misskey.py | 80 ++-- gallery_dl/extractor/moebooru.py | 23 +- gallery_dl/extractor/motherless.py | 43 +- gallery_dl/extractor/myhentaigallery.py | 43 +- gallery_dl/extractor/myportfolio.py | 2 +- gallery_dl/extractor/naverblog.py | 11 +- gallery_dl/extractor/naverchzzk.py | 12 +- gallery_dl/extractor/naverwebtoon.py | 4 +- gallery_dl/extractor/nekohouse.py | 12 +- gallery_dl/extractor/newgrounds.py | 37 +- gallery_dl/extractor/nijie.py | 31 +- gallery_dl/extractor/nitter.py | 20 +- gallery_dl/extractor/noop.py | 6 +- gallery_dl/extractor/nozomi.py | 9 +- gallery_dl/extractor/nudostar.py | 6 +- gallery_dl/extractor/oauth.py | 32 +- gallery_dl/extractor/okporn.py | 39 ++ gallery_dl/extractor/paheal.py | 7 +- gallery_dl/extractor/patreon.py | 62 +-- gallery_dl/extractor/pexels.py | 13 +- gallery_dl/extractor/philomena.py | 9 +- gallery_dl/extractor/photovogue.py | 7 +- gallery_dl/extractor/picarto.py | 5 +- gallery_dl/extractor/picazor.py | 59 +++ gallery_dl/extractor/pictoa.py | 6 +- gallery_dl/extractor/piczel.py | 13 +- gallery_dl/extractor/pillowfort.py | 16 +- gallery_dl/extractor/pinterest.py | 22 +- gallery_dl/extractor/pixeldrain.py | 24 +- gallery_dl/extractor/pixiv.py | 81 ++-- gallery_dl/extractor/pixnet.py | 12 +- gallery_dl/extractor/plurk.py | 13 +- gallery_dl/extractor/poipiku.py | 2 +- gallery_dl/extractor/poringa.py | 8 +- gallery_dl/extractor/pornhub.py | 17 +- gallery_dl/extractor/pornpics.py | 38 +- gallery_dl/extractor/pornstarstube.py | 43 ++ gallery_dl/extractor/postmill.py | 28 +- gallery_dl/extractor/rawkuma.py | 63 +-- gallery_dl/extractor/reactor.py | 14 +- gallery_dl/extractor/readcomiconline.py | 4 +- gallery_dl/extractor/realbooru.py | 39 +- gallery_dl/extractor/recursive.py | 4 +- gallery_dl/extractor/redbust.py | 186 -------- gallery_dl/extractor/reddit.py | 36 +- gallery_dl/extractor/redgifs.py | 4 +- gallery_dl/extractor/rule34us.py | 6 +- gallery_dl/extractor/rule34vault.py | 9 +- gallery_dl/extractor/rule34xyz.py | 9 +- gallery_dl/extractor/s3ndpics.py | 8 +- gallery_dl/extractor/saint.py | 8 +- gallery_dl/extractor/sankaku.py | 22 +- gallery_dl/extractor/sankakucomplex.py | 10 +- gallery_dl/extractor/schalenetwork.py | 25 +- gallery_dl/extractor/scrolller.py | 8 +- gallery_dl/extractor/seiga.py | 4 +- gallery_dl/extractor/sexcom.py | 54 ++- gallery_dl/extractor/shimmie2.py | 18 +- gallery_dl/extractor/shopify.py | 6 +- gallery_dl/extractor/simpcity.py | 186 -------- gallery_dl/extractor/simplyhentai.py | 8 +- gallery_dl/extractor/sizebooru.py | 4 +- gallery_dl/extractor/skeb.py | 18 +- gallery_dl/extractor/slickpic.py | 6 +- gallery_dl/extractor/slideshare.py | 5 +- gallery_dl/extractor/smugmug.py | 8 +- gallery_dl/extractor/soundgasm.py | 6 +- gallery_dl/extractor/speakerdeck.py | 4 +- gallery_dl/extractor/steamgriddb.py | 12 +- gallery_dl/extractor/subscribestar.py | 28 +- gallery_dl/extractor/sxypix.py | 39 ++ gallery_dl/extractor/szurubooru.py | 7 +- gallery_dl/extractor/tapas.py | 10 +- gallery_dl/extractor/tcbscans.py | 4 +- gallery_dl/extractor/telegraph.py | 5 +- gallery_dl/extractor/tenor.py | 17 +- gallery_dl/extractor/thehentaiworld.py | 7 +- gallery_dl/extractor/tiktok.py | 47 +- gallery_dl/extractor/tmohentai.py | 2 +- gallery_dl/extractor/toyhouse.py | 6 +- gallery_dl/extractor/tsumino.py | 2 +- gallery_dl/extractor/tumblr.py | 37 +- gallery_dl/extractor/tumblrgallery.py | 6 +- gallery_dl/extractor/tungsten.py | 4 +- gallery_dl/extractor/twibooru.py | 13 +- gallery_dl/extractor/twitter.py | 252 +++++++---- gallery_dl/extractor/unsplash.py | 14 +- gallery_dl/extractor/uploadir.py | 2 +- gallery_dl/extractor/urlgalleries.py | 4 +- gallery_dl/extractor/urlshortener.py | 2 +- gallery_dl/extractor/vanillarock.py | 6 +- gallery_dl/extractor/vichan.py | 6 +- gallery_dl/extractor/vipergirls.py | 10 +- gallery_dl/extractor/vk.py | 16 +- gallery_dl/extractor/vsco.py | 24 +- gallery_dl/extractor/wallhaven.py | 5 +- gallery_dl/extractor/wallpapercave.py | 6 +- gallery_dl/extractor/warosu.py | 4 +- gallery_dl/extractor/weasyl.py | 29 +- gallery_dl/extractor/webmshare.py | 4 +- gallery_dl/extractor/webtoons.py | 10 +- gallery_dl/extractor/weebcentral.py | 8 +- gallery_dl/extractor/weebdex.py | 132 ++++++ gallery_dl/extractor/weibo.py | 12 +- gallery_dl/extractor/wikiart.py | 10 +- gallery_dl/extractor/wikifeet.py | 4 +- gallery_dl/extractor/wikimedia.py | 98 +++-- gallery_dl/extractor/xasiat.py | 25 +- gallery_dl/extractor/xenforo.py | 348 +++++++++++++++ gallery_dl/extractor/xfolio.py | 8 +- gallery_dl/extractor/xhamster.py | 8 +- gallery_dl/extractor/xvideos.py | 4 +- gallery_dl/extractor/yiffverse.py | 9 +- gallery_dl/extractor/ytdl.py | 2 +- gallery_dl/extractor/zerochan.py | 8 +- gallery_dl/formatter.py | 86 ++-- gallery_dl/job.py | 171 ++++---- gallery_dl/option.py | 18 +- gallery_dl/output.py | 56 ++- gallery_dl/path.py | 95 ++-- gallery_dl/postprocessor/__init__.py | 2 +- gallery_dl/postprocessor/exec.py | 9 +- gallery_dl/postprocessor/metadata.py | 10 +- gallery_dl/postprocessor/mtime.py | 7 +- gallery_dl/postprocessor/ugoira.py | 6 +- gallery_dl/text.py | 65 +-- gallery_dl/update.py | 2 +- gallery_dl/util.py | 74 +--- gallery_dl/version.py | 2 +- gallery_dl/ytdl.py | 6 +- setup.py | 1 + test/test_downloader.py | 15 +- test/test_dt.py | 167 ++++++++ test/test_extractor.py | 86 +++- test/test_formatter.py | 41 +- test/test_job.py | 3 +- test/test_path.py | 297 +++++++++++++ test/test_postprocessor.py | 86 +++- test/test_results.py | 14 +- test/test_text.py | 69 +-- test/test_util.py | 90 +--- 286 files changed, 6087 insertions(+), 2910 deletions(-) create mode 100644 gallery_dl/dt.py create mode 100644 gallery_dl/extractor/arena.py create mode 100644 gallery_dl/extractor/audiochan.py create mode 100644 gallery_dl/extractor/cfake.py create mode 100644 gallery_dl/extractor/comedywildlifephoto.py create mode 100644 gallery_dl/extractor/eporner.py create mode 100644 gallery_dl/extractor/fikfap.py create mode 100644 gallery_dl/extractor/fitnakedgirls.py create mode 100644 gallery_dl/extractor/koofr.py create mode 100644 gallery_dl/extractor/okporn.py create mode 100644 gallery_dl/extractor/picazor.py create mode 100644 gallery_dl/extractor/pornstarstube.py delete mode 100644 gallery_dl/extractor/redbust.py delete mode 100644 gallery_dl/extractor/simpcity.py create mode 100644 gallery_dl/extractor/sxypix.py create mode 100644 gallery_dl/extractor/weebdex.py create mode 100644 gallery_dl/extractor/xenforo.py create mode 100644 test/test_dt.py create mode 100644 test/test_path.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b2f503..ed0715f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,38 +1,32 @@ -## 1.30.10 - 2025-10-12 +## 1.31.1 - 2025-12-20 ### Extractors #### Additions -- [bluesky] add `bookmark` extractor ([#8370](https://github.com/mikf/gallery-dl/issues/8370)) -- [dandadan] add support ([#8381](https://github.com/mikf/gallery-dl/issues/8381)) +- [2chen] implement generic `2chen` board extractors + - support `https://schan.help/` ([#8680](https://github.com/mikf/gallery-dl/issues/8680)) +- [aryion] add `watch` extractor ([#8705](https://github.com/mikf/gallery-dl/issues/8705)) +- [comedywildlifephoto] add `gallery` extractor ([#8690](https://github.com/mikf/gallery-dl/issues/8690)) +- [koofr] add `shared` extractor ([#8700](https://github.com/mikf/gallery-dl/issues/8700)) +- [picazor] add `user` extractor ([#7083](https://github.com/mikf/gallery-dl/issues/7083) [#7504](https://github.com/mikf/gallery-dl/issues/7504) [#7795](https://github.com/mikf/gallery-dl/issues/7795) [#8717](https://github.com/mikf/gallery-dl/issues/8717)) +- [weebdex] add support ([#8722](https://github.com/mikf/gallery-dl/issues/8722)) +- [xenforo] support `allthefallen.moe/forum` ([#3249](https://github.com/mikf/gallery-dl/issues/3249) [#8268](https://github.com/mikf/gallery-dl/issues/8268)) #### Fixes -- [bellazon] fix video URL extraction ([#8392](https://github.com/mikf/gallery-dl/issues/8392)) -- [bluesky] handle exceptions during file extraction -- [civitai] prevent downloading random posts from deleted users ([#8299](https://github.com/mikf/gallery-dl/issues/8299)) -- [girlsreleased] update API endpoints ([#8360](https://github.com/mikf/gallery-dl/issues/8360)) -- [instagram] restore `video_dash_manifest` downloads ([#8364](https://github.com/mikf/gallery-dl/issues/8364)) -- [kemono] prevent fatal exceptions when retrieving user profile data ([#8382](https://github.com/mikf/gallery-dl/issues/8382)) -- [mangadex] fix `RuntimeError` for titles without a `description` ([#8389](https://github.com/mikf/gallery-dl/issues/8389)) -- [naver-blog] fix video extraction ([#8385](https://github.com/mikf/gallery-dl/issues/8385)) -- [poipiku] fix original file downloads ([#8356](https://github.com/mikf/gallery-dl/issues/8356)) -- [weibo] fix retrieving followers-only content ([#6447](https://github.com/mikf/gallery-dl/issues/6447) [#7939](https://github.com/mikf/gallery-dl/issues/7939) [#8063](https://github.com/mikf/gallery-dl/issues/8063) [#8354](https://github.com/mikf/gallery-dl/issues/8354) [#8357](https://github.com/mikf/gallery-dl/issues/8357)) -- [weibo] use `page` parameter for `feed` results ([#7523](https://github.com/mikf/gallery-dl/issues/7523) [#8128](https://github.com/mikf/gallery-dl/issues/8128) [#8357](https://github.com/mikf/gallery-dl/issues/8357)) -- [wikimedia] fix name & extension of files without an extension ([#8344](https://github.com/mikf/gallery-dl/issues/8344)) -- [wikimedia] ignore missing files ([#8388](https://github.com/mikf/gallery-dl/issues/8388)) +- [aryion:favorite] fix extraction ([#8705](https://github.com/mikf/gallery-dl/issues/8705) [#8723](https://github.com/mikf/gallery-dl/issues/8723) [#8728](https://github.com/mikf/gallery-dl/issues/8728)) +- [aryion] fix `description` metadata +- [boosty] include `Authorization` header with file downloads ([#8704](https://github.com/mikf/gallery-dl/issues/8704)) +- [fanbox] make `_extract_post()` non-fatal ([#8711](https://github.com/mikf/gallery-dl/issues/8711)) +- [furaffinity] fix `tags` metadata ([#8724](https://github.com/mikf/gallery-dl/issues/8724)) +- [mastodon] fix `AttributeError: 'parse_datetime_iso'` ([#8709](https://github.com/mikf/gallery-dl/issues/8709)) +- [tenor] fix `title` metadata +- [twitter] fix `avatar` & `background` downloads with `"expand": true` ([#8698](https://github.com/mikf/gallery-dl/issues/8698)) #### Improvements -- [bellazon] ignore links to other threads ([#8392](https://github.com/mikf/gallery-dl/issues/8392)) -- [common] disable delay for `request_location()` -- [fansly] update format selection ([#4401](https://github.com/mikf/gallery-dl/issues/4401)) -- [fansly] download user posts from all account walls ([#4401](https://github.com/mikf/gallery-dl/issues/4401)) -- [instagram] support `/share/SHORTCODE` URLs ([#8340](https://github.com/mikf/gallery-dl/issues/8340)) -- [weibo] ignore ongoing live streams ([#8339](https://github.com/mikf/gallery-dl/issues/8339)) -- [zerochan] forward URL parameters to API requests ([#8377](https://github.com/mikf/gallery-dl/issues/8377)) -#### Metadata -- [instagram] extract `subscription` metadata ([#8349](https://github.com/mikf/gallery-dl/issues/8349)) -- [webtoons] fix `episode` metadata extraction ([#2591](https://github.com/mikf/gallery-dl/issues/2591)) -#### Removals -- [twitter] remove login support ([#4202](https://github.com/mikf/gallery-dl/issues/4202) [#6029](https://github.com/mikf/gallery-dl/issues/6029) [#6040](https://github.com/mikf/gallery-dl/issues/6040) [#8362](https://github.com/mikf/gallery-dl/issues/8362)) -### Post Processors -- [exec] support `{_temppath}` replacement fields ([#8329](https://github.com/mikf/gallery-dl/issues/8329)) +- [boosty] warn about expired `auth` cookie tokens ([#8704](https://github.com/mikf/gallery-dl/issues/8704)) +- [misskey] implement `order-posts` option ([#8516](https://github.com/mikf/gallery-dl/issues/8516)) +- [reddit] use `"videos": "dash"` by default ([#8657](https://github.com/mikf/gallery-dl/issues/8657)) +- [pixiv] warn about invalid `PHPSESSID` cookie ([#8689](https://github.com/mikf/gallery-dl/issues/8689)) +### Downloaders +- [ytdl] fix `UnboundLocalError: 'tries'` ([#8707](https://github.com/mikf/gallery-dl/issues/8707)) +- [ytdl] respect `--no-skip` ### Miscellaneous -- [formatter] improve error messages ([#8369](https://github.com/mikf/gallery-dl/issues/8369)) -- [path] implement conditional `base-directory` -- use `utf-8` encoding when opening files in text mode ([#8376](https://github.com/mikf/gallery-dl/issues/8376)) +- [path] implement dynamic length directories ([#1350](https://github.com/mikf/gallery-dl/issues/1350)) +- [formatter] add `I` format specifier - identity +- [tests] add `path` tests diff --git a/PKG-INFO b/PKG-INFO index 6a8f856..abf3e16 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.4 Name: gallery_dl -Version: 1.30.10 +Version: 1.31.1 Summary: Command-line program to download image galleries and collections from several image hosting sites Home-page: https://github.com/mikf/gallery-dl Download-URL: https://github.com/mikf/gallery-dl/releases/latest @@ -24,6 +24,7 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP @@ -141,9 +142,9 @@ Standalone Executable Prebuilt executable files with a Python interpreter and required Python packages included are available for -- `Windows `__ +- `Windows `__ (Requires `Microsoft Visual C++ Redistributable Package (x86) `__) -- `Linux `__ +- `Linux `__ Nightly Builds @@ -224,6 +225,13 @@ Pulling image from `GitHub Container Registry `__ +- `Windows `__ (Requires `Microsoft Visual C++ Redistributable Package (x86) `__) -- `Linux `__ +- `Linux `__ Nightly Builds @@ -162,6 +162,13 @@ Pulling image from `GitHub Container Registry ' \ --download-archive'[Record successfully downloaded files in FILE and skip downloading any file already in it]':'':_files \ --range'[Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. '\''5'\'', '\''8-20'\'', or '\''1:24:3'\'')]':'' \ ---chapter-range'[Like '\''--range'\'', but applies to manga chapters and other delegated URLs]':'' \ +--post-range'[Like '\''--range'\'', but for posts]':'' \ +--chapter-range'[Like '\''--range'\'', but for child extractors handling manga chapters, external URLs, etc.]':'' \ --filter'[Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by '\''-K'\''. Example: --filter "image_width >= 1000 and rating in ('\''s'\'', '\''q'\'')"]':'' \ ---chapter-filter'[Like '\''--filter'\'', but applies to manga chapters and other delegated URLs]':'' \ +--post-filter'[Like '\''--filter'\'', but for posts]':'' \ +--chapter-filter'[Like '\''--filter'\'', but for child extractors handling manga chapters, external URLs, etc.]':'' \ {-P,--postprocessor}'[Activate the specified post processor]':'' \ --no-postprocessors'[Do not run any post processors]' \ {-O,--postprocessor-option}'[Additional post processor options]':'' \ diff --git a/data/completion/gallery-dl b/data/completion/gallery-dl index ae4cb0f..1f3a33d 100644 --- a/data/completion/gallery-dl +++ b/data/completion/gallery-dl @@ -10,7 +10,7 @@ _gallery_dl() elif [[ "${prev}" =~ ^()$ ]]; then COMPREPLY=( $(compgen -d -- "${cur}") ) else - COMPREPLY=( $(compgen -W "--help --version --filename --destination --directory --extractors --user-agent --clear-cache --compat --update-check --input-file --input-file-comment --input-file-delete --no-input --quiet --warning --verbose --get-urls --resolve-urls --dump-json --resolve-json --simulate --extractor-info --list-keywords --error-file --print --Print --print-to-file --Print-to-file --list-modules --list-extractors --write-log --write-unsupported --write-pages --print-traffic --no-colors --retries --http-timeout --proxy --source-address --force-ipv4 --force-ipv6 --no-check-certificate --limit-rate --chunk-size --sleep --sleep-request --sleep-429 --sleep-extractor --no-part --no-skip --no-mtime --no-download --option --config --config-yaml --config-toml --config-create --config-status --config-open --config-ignore --ignore-config --username --password --netrc --cookies --cookies-export --cookies-from-browser --abort --terminate --filesize-min --filesize-max --download-archive --range --chapter-range --filter --chapter-filter --postprocessor --no-postprocessors --postprocessor-option --write-metadata --write-info-json --write-infojson --write-tags --zip --cbz --mtime --mtime-from-date --rename --rename-to --ugoira --ugoira-conv --ugoira-conv-lossless --ugoira-conv-copy --exec --exec-after" -- "${cur}") ) + COMPREPLY=( $(compgen -W "--help --version --filename --destination --directory --extractors --user-agent --clear-cache --compat --update-check --input-file --input-file-comment --input-file-delete --no-input --quiet --warning --verbose --get-urls --resolve-urls --dump-json --resolve-json --simulate --extractor-info --list-keywords --error-file --print --Print --print-to-file --Print-to-file --list-modules --list-extractors --write-log --write-unsupported --write-pages --print-traffic --no-colors --retries --http-timeout --proxy --source-address --force-ipv4 --force-ipv6 --no-check-certificate --limit-rate --chunk-size --sleep --sleep-request --sleep-429 --sleep-extractor --no-part --no-skip --no-mtime --no-download --option --config --config-yaml --config-toml --config-create --config-status --config-open --config-ignore --ignore-config --username --password --netrc --cookies --cookies-export --cookies-from-browser --abort --terminate --filesize-min --filesize-max --download-archive --range --post-range --chapter-range --filter --post-filter --chapter-filter --postprocessor --no-postprocessors --postprocessor-option --write-metadata --write-info-json --write-infojson --write-tags --zip --cbz --mtime --mtime-from-date --rename --rename-to --ugoira --ugoira-conv --ugoira-conv-lossless --ugoira-conv-copy --exec --exec-after" -- "${cur}") ) fi } diff --git a/data/completion/gallery-dl.fish b/data/completion/gallery-dl.fish index 84ff5b5..5a3d8aa 100644 --- a/data/completion/gallery-dl.fish +++ b/data/completion/gallery-dl.fish @@ -73,9 +73,11 @@ complete -c gallery-dl -x -l 'filesize-min' -d 'Do not download files smaller th complete -c gallery-dl -x -l 'filesize-max' -d 'Do not download files larger than SIZE (e.g. 500k or 2.5M)' complete -c gallery-dl -r -F -l 'download-archive' -d 'Record successfully downloaded files in FILE and skip downloading any file already in it' complete -c gallery-dl -x -l 'range' -d 'Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. "5", "8-20", or "1:24:3")' -complete -c gallery-dl -x -l 'chapter-range' -d 'Like "--range", but applies to manga chapters and other delegated URLs' +complete -c gallery-dl -x -l 'post-range' -d 'Like "--range", but for posts' +complete -c gallery-dl -x -l 'chapter-range' -d 'Like "--range", but for child extractors handling manga chapters, external URLs, etc.' complete -c gallery-dl -x -l 'filter' -d 'Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by "-K". Example: --filter "image_width >= 1000 and rating in ("s", "q")"' -complete -c gallery-dl -x -l 'chapter-filter' -d 'Like "--filter", but applies to manga chapters and other delegated URLs' +complete -c gallery-dl -x -l 'post-filter' -d 'Like "--filter", but for posts' +complete -c gallery-dl -x -l 'chapter-filter' -d 'Like "--filter", but for child extractors handling manga chapters, external URLs, etc.' complete -c gallery-dl -x -s 'P' -l 'postprocessor' -d 'Activate the specified post processor' complete -c gallery-dl -l 'no-postprocessors' -d 'Do not run any post processors' complete -c gallery-dl -x -s 'O' -l 'postprocessor-option' -d 'Additional post processor options' diff --git a/data/man/gallery-dl.1 b/data/man/gallery-dl.1 index 9751705..90a423a 100644 --- a/data/man/gallery-dl.1 +++ b/data/man/gallery-dl.1 @@ -1,4 +1,4 @@ -.TH "GALLERY-DL" "1" "2025-10-12" "1.30.10" "gallery-dl Manual" +.TH "GALLERY-DL" "1" "2025-12-20" "1.31.1" "gallery-dl Manual" .\" disable hyphenation .nh @@ -236,14 +236,20 @@ Record successfully downloaded files in FILE and skip downloading any file alrea .B "\-\-range" \f[I]RANGE\f[] Index range(s) specifying which files to download. These can be either a constant value, range, or slice (e.g. '5', '8-20', or '1:24:3') .TP +.B "\-\-post\-range" \f[I]RANGE\f[] +Like '--range', but for posts +.TP .B "\-\-chapter\-range" \f[I]RANGE\f[] -Like '--range', but applies to manga chapters and other delegated URLs +Like '--range', but for child extractors handling manga chapters, external URLs, etc. .TP .B "\-\-filter" \f[I]EXPR\f[] Python expression controlling which files to download. Files for which the expression evaluates to False are ignored. Available keys are the filename-specific ones listed by '-K'. Example: --filter "image_width >= 1000 and rating in ('s', 'q')" .TP +.B "\-\-post\-filter" \f[I]EXPR\f[] +Like '--filter', but for posts +.TP .B "\-\-chapter\-filter" \f[I]EXPR\f[] -Like '--filter', but applies to manga chapters and other delegated URLs +Like '--filter', but for child extractors handling manga chapters, external URLs, etc. .TP .B "\-P, \-\-postprocessor" \f[I]NAME\f[] Activate the specified post processor diff --git a/data/man/gallery-dl.conf.5 b/data/man/gallery-dl.conf.5 index 320963f..7729342 100644 --- a/data/man/gallery-dl.conf.5 +++ b/data/man/gallery-dl.conf.5 @@ -1,4 +1,4 @@ -.TH "GALLERY-DL.CONF" "5" "2025-10-12" "1.30.10" "gallery-dl Manual" +.TH "GALLERY-DL.CONF" "5" "2025-12-20" "1.31.1" "gallery-dl Manual" .\" disable hyphenation .nh .\" disable justification (adjust text to left margin only) @@ -222,11 +222,14 @@ Use an extractor's current target directory as * \f[I]string\f[] .IP "Default:" 9 +\f[I]true\f[] +\f[I][chevereto]\f[] | +\f[I][imagehost]\f[] \f[I]false\f[] +otherwise .IP "Description:" 4 -If \f[I]true\f[], overwrite any metadata provided by a child extractor -with its parent's. +Forward a parent's metadata to its child extractors. If this is a \f[I]string\f[], add a parent's metadata to its children's .br @@ -650,6 +653,8 @@ This is supported for .br * \f[I]nijie\f[] (\f[I]R\f[]) .br +* \f[I]nudostarforum\f[] +.br * \f[I]pillowfort\f[] .br * \f[I]rule34xyz\f[] @@ -660,6 +665,8 @@ This is supported for .br * \f[I]seiga\f[] .br +* \f[I]simpcity\f[] +.br * \f[I]subscribestar\f[] .br * \f[I]tapas\f[] @@ -945,8 +952,9 @@ and use the \f[I]User-Agent\f[] header of this installed browser. \f[I]"firefox"\f[] \f[I]artstation\f[] \f[I] \f[I]behance\f[] \f[] -\f[I]fanbox\f[] | -\f[I]twitter\f[] +\f[I]fanbox\f[] \f[I] +\f[I]twitter\f[] \f[] +\f[I]vsco\f[] \f[I]null\f[] otherwise @@ -1090,9 +1098,8 @@ to use these browser's default ciphers. .IP "Default:" 9 \f[I]false\f[] -\f[I]artstation\f[] \f[I] -\f[I]behance\f[] \f[] -\f[I]vsco\f[] +\f[I]artstation\f[] | +\f[I]behance\f[] \f[I]true\f[] otherwise @@ -1114,6 +1121,18 @@ and potentially bypass Cloudflare blocks. Additional name-value pairs to be added to each metadata dictionary. +.SS extractor.*.keywords-default +.IP "Type:" 6 +any + +.IP "Default:" 9 +\f[I]"None"\f[] + +.IP "Description:" 4 +Default value used for missing or undefined keyword names in a +\f[I]Format String\f[]. + + .SS extractor.*.keywords-eval .IP "Type:" 6 \f[I]bool\f[] @@ -1122,20 +1141,27 @@ Additional name-value pairs to be added to each metadata dictionary. \f[I]false\f[] .IP "Description:" 4 -Evaluate each \f[I]keywords\f[] \f[I]string\f[] value -as a \f[I]Format String\f[]. +Evaluate each +\f[I]keywords\f[] +and +\f[I]keywords-global\f[] +\f[I]string\f[] value as a \f[I]Format String\f[]. -.SS extractor.*.keywords-default +.SS extractor.*.keywords-global .IP "Type:" 6 -any +\f[I]object\f[] (name → value) -.IP "Default:" 9 -\f[I]"None"\f[] +.IP "Example:" 4 +{"type": "Original", "type_id": 1, "type_category": "meta"} .IP "Description:" 4 -Default value used for missing or undefined keyword names in a -\f[I]Format String\f[]. +Global name-value pairs to be added to each metadata dictionary. + +.IP "Note:" 4 +Keywords defined here will be overwritten by keywords from +\f[I]extractor.keywords\f[] +with the same name. .SS extractor.*.url-metadata @@ -1299,17 +1325,22 @@ may pose a security risk. .IP "Example:" 4 .br -* "file,skip" +* "after,skip" .br -* ["file", "skip"] +* ["after", "skip"] .IP "Description:" 4 \f[I]Event(s)\f[] for which IDs get written to an \f[I]archive\f[]. -Available events are: -\f[I]file\f[], \f[I]skip\f[] +.IP "Available Events:" 4 +.br +* \f[I]file\f[] +.br +* \f[I]after\f[] +.br +* \f[I]skip\f[] .SS extractor.*.archive-format @@ -1647,13 +1678,22 @@ For example \f[I]5-\f[], \f[I]5:\f[], and \f[I]5::\f[] all mean "Start at file n The index of the first file is \f[I]1\f[]. +.SS extractor.*.post-range +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Description:" 4 +Like \f[I]image-range\f[], +but for posts. + + .SS extractor.*.chapter-range .IP "Type:" 6 \f[I]string\f[] .IP "Description:" 4 Like \f[I]image-range\f[], -but applies to delegated URLs like manga chapters, etc. +but for child extractors handling manga chapters, external URLs, etc. .SS extractor.*.image-filter @@ -1677,6 +1717,24 @@ A file only gets downloaded when *all* of the given \f[I]Expressions\f[] evaluat Available values are the filename-specific ones listed by \f[I]-K\f[] or \f[I]-j\f[]. +.SS extractor.*.post-filter +.IP "Type:" 6 +.br +* \f[I]Condition\f[] +.br +* \f[I]list\f[] of \f[I]Conditions\f[] + +.IP "Example:" 4 +.br +* "post['id'] > 12345" +.br +* ["date >= datetime(2025, 5, 1)", "print(post_id)"] + +.IP "Description:" 4 +Like \f[I]image-filter\f[], +but for posts. + + .SS extractor.*.chapter-filter .IP "Type:" 6 .br @@ -1692,7 +1750,7 @@ Available values are the filename-specific ones listed by \f[I]-K\f[] or \f[I]-j .IP "Description:" 4 Like \f[I]image-filter\f[], -but applies to delegated URLs like manga chapters, etc. +but for child extractors handling manga chapters, external URLs, etc. .SS extractor.*.image-unique @@ -1724,7 +1782,7 @@ but applies to delegated URLs like manga chapters, etc. \f[I]string\f[] .IP "Default:" 9 -\f[I]"%Y-%m-%dT%H:%M:%S"\f[] +\f[I]null\f[] .IP "Description:" 4 Format string used to parse \f[I]string\f[] values of @@ -1732,6 +1790,16 @@ date-min and date-max. See \f[I]strptime\f[] for a list of formatting directives. +.IP "Special Values:" 4 +\f[I]null\f[] +Parse date-min and date-max according to +.br +\f[I]ISO 8601\f[] +See +.br +\f[I]datetime.fromisoformat()\f[] +for details and examples. + .IP "Note:" 4 Despite its name, this option does **not** control how \f[I]{date}\f[] metadata fields are formatted. @@ -2018,6 +2086,25 @@ https://developers.google.com/blogger/docs/3.0/using#APIKey Download embedded videos hosted on https://www.blogger.com/ +.SS extractor.bluesky.api-server +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"https://bsky.social"\f[] if a +.br +\f[I]username\f[] +is provided +\f[I]"https://api.bsky.app"\f[] otherwise +.br + +.IP "Description:" 4 +Server address for API requests. + +Can be used when self-hosting a +\f[I]PDS\f[] + + .SS extractor.bluesky.include .IP "Type:" 6 .br @@ -2403,15 +2490,26 @@ as well as the default \f[I]user-images\f[] and \f[I]user-videos\f[]: .IP "Example:" 4 .br -* "generation,post,version" +* "generation,tags,post,version" .br * ["version", "generation"] .IP "Description:" 4 -Extract additional \f[I]generation\f[], \f[I]version\f[], and \f[I]post\f[] metadata. +Extract additional metadata. + +.IP "Supported Values:" 4 +.br +* \f[I]generation\f[] +.br +* \f[I]post\f[] +.br +* \f[I]tags\f[] +.br +* \f[I]version\f[] .IP "Note:" 4 -This requires 1 or more additional API requests per image or video. +This requires 1 additional API request +for each selected value per image or video. .SS extractor.civitai.nsfw @@ -2444,6 +2542,60 @@ while \f[I]3\f[] (\f[I]1|2\f[]) would return only \f[I]None\f[] and \f[I]Soft\f[] rated images, +.SS extractor.civitai.period +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"AllTime"\f[] + +.IP "Description:" 4 +Sets the \f[I]period\f[] parameter +when paginating over results. + +.IP "Supported Values:" 4 +.br +* \f[I]"AllTime"\f[] +.br +* \f[I]"Year"\f[] +.br +* \f[I]"Month"\f[] +.br +* \f[I]"Week"\f[] +.br +* \f[I]"Day"\f[] + + +.SS extractor.civitai.sort +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"Newest"\f[] + +.IP "Description:" 4 +Sets the \f[I]sort\f[] parameter +when paginating over results. + +.IP "Supported Values:" 4 +.br +* \f[I]"Newest"\f[] +.br +* \f[I]"Oldest"\f[] +.br +* \f[I]"Most Reactions"\f[] +.br +* \f[I]"Most Comments"\f[] +.br +* \f[I]"Most Collected"\f[] + +.IP "Special Values:" 4 +\f[I]"asc"\f[] +Ascending order (\f[I]"Oldest"\f[]) +\f[I]"desc"\f[] | \f[I]"reverse"\f[] +Descending order (\f[I]"Newest"\f[]) + + .SS extractor.civitai.quality .IP "Type:" 6 .br @@ -2559,6 +2711,17 @@ to be interactively prompted for a password when needed (see \f[I]getpass()\f[]). +.SS extractor.cyberfile.recursive +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]true\f[] + +.IP "Description:" 4 +Recursively download files from subfolders. + + .SS extractor.[Danbooru].external .IP "Type:" 6 \f[I]bool\f[] @@ -3483,6 +3646,17 @@ Extract \f[I]plan\f[] and extended \f[I]user\f[] metadata. \f[I]fanbox.comments\f[] +.SS extractor.fanbox.creator.offset +.IP "Type:" 6 +\f[I]integer\f[] + +.IP "Default:" 9 +\f[I]0\f[] + +.IP "Description:" 4 +Custom \f[I]offset\f[] starting value when paginating over posts. + + .SS extractor.fansly.formats .IP "Type:" 6 \f[I]list\f[] of \f[I]integers\f[] @@ -3799,6 +3973,21 @@ to attempt to fetch the current value used by gofile. Recursively download files from subfolders. +.SS extractor.hdoujin.cbz +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Download each gallery as a single \f[I].cbz\f[] file. + +.IP "Note:" 4 +Requires a +\f[I]token\f[] + + .SS extractor.hdoujin.crt .IP "Type:" 6 \f[I]string\f[] @@ -3878,7 +4067,10 @@ for example \f[I]tags_artist\f[] or \f[I]tags_character\f[]. .IP "Description:" 4 \f[I]Authorization\f[] header value used for requests to \f[I]https://api.hdoujin.org\f[] -to access \f[I]favorite\f[] galleries. +to access \f[I]favorite\f[] galleries +or download +\f[I].cbz\f[] +archives. .SS extractor.hentaifoundry.descriptions @@ -4277,6 +4469,25 @@ when processing a user profile. It is possible to use \f[I]"all"\f[] instead of listing all values separately. +.SS extractor.itaku.order +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"desc"\f[] + +.IP "Description:" 4 +Controls the order in which +images/posts/users are returned. + +\f[I]"asc"\f[] | \f[I]"reverse"\f[] +Ascending order (oldest first) +\f[I]"desc"\f[] +Descending order (newest first) +any other \f[I]string\f[] +Custom result order + + .SS extractor.itaku.videos .IP "Type:" 6 \f[I]bool\f[] @@ -4605,6 +4816,17 @@ Additional query parameters to send when fetching manga chapters. and \f[I]/user/follows/manga/feed\f[]) +.SS extractor.mangadex.data-saver +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Enable Data Saver mode and download lower quality versions of chapters. + + .SS extractor.mangadex.lang .IP "Type:" 6 .br @@ -4764,6 +4986,17 @@ Also emit metadata for text-only posts without media content. Your access token, necessary to fetch favorited notes. +.SS extractor.[misskey].date-min & .date-max +.IP "Type:" 6 +\f[I]Date\f[] + +.IP "Default:" 9 +\f[I]null\f[] + +.IP "Description:" 4 +Retrieve only notes posted after/before this \f[I]Date\f[] + + .SS extractor.[misskey].include .IP "Type:" 6 .br @@ -4798,6 +5031,22 @@ when processing a user profile. It is possible to use \f[I]"all"\f[] instead of listing all values separately. +.SS extractor.[misskey].order-posts +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"desc"\f[] + +.IP "Description:" 4 +Controls the order in which posts are processed. + +\f[I]"asc"\f[] | \f[I]"reverse"\f[] +Ascending order (oldest first) +\f[I]"desc"\f[] +Descending order (newest first) + + .SS extractor.[misskey].renotes .IP "Type:" 6 \f[I]bool\f[] @@ -4820,6 +5069,17 @@ Fetch media from renoted notes. Fetch media from replies to other notes. +.SS extractor.[misskey].text-posts +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Also retrieve text-only notes without media content. + + .SS extractor.[moebooru].pool.metadata .IP "Type:" 6 \f[I]bool\f[] @@ -5153,6 +5413,36 @@ Selects the format of \f[I]images\f[] \f[I]files\f[]. * \f[I]thumbnail_small\f[] (\f[I]"h":100,"w":100\f[]) +.SS extractor.patreon.order-posts +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]collection\f[] +\f[I]"asc"\f[] +otherwise +\f[I]"desc"\f[] + +.IP "Example:" 4 +.br +* "-published_at" +.br +* "collection_order" + +.IP "Description:" 4 +Controls the order in which +posts are returned and processed. + +\f[I]"asc"\f[] +Ascending order (oldest first) +\f[I]"desc"\f[] +Descending order (newest first) +\f[I]"reverse"\f[] +Reverse order +any other \f[I]string\f[] +Custom \f[I]sort\f[] order + + .SS extractor.patreon.user.date-max .IP "Type:" 6 \f[I]Date\f[] @@ -5681,7 +5971,7 @@ or \f[I]"hq"\f[] if not present. \f[I]string\f[] .IP "Default:" 9 -\f[I]"oauth"\f[] +\f[I]"rest"\f[] .IP "Description:" 4 Selects which API endpoints to use. @@ -5859,7 +6149,7 @@ Follow links in the original post's \f[I]selftext\f[]. * \f[I]string\f[] .IP "Default:" 9 -\f[I]true\f[] +\f[I]"dash"\f[] .IP "Description:" 4 Control video download behavior. @@ -6029,6 +6319,21 @@ Download video embeds from external sites. Download videos. +.SS extractor.schalenetwork.cbz +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Download each gallery as a single \f[I].cbz\f[] file. + +.IP "Note:" 4 +Requires a +\f[I]token\f[] + + .SS extractor.schalenetwork.crt .IP "Type:" 6 \f[I]string\f[] @@ -6115,7 +6420,10 @@ for example \f[I]tags_artist\f[] or \f[I]tags_character\f[]. .IP "Description:" 4 \f[I]Authorization\f[] header value used for requests to \f[I]https://api.schale.network\f[] -to access \f[I]favorite\f[] galleries. +to access \f[I]favorite\f[] galleries +or download +\f[I].cbz\f[] +archives. .SS extractor.sexcom.gifs @@ -6129,23 +6437,6 @@ to access \f[I]favorite\f[] galleries. Download animated images as \f[I].gif\f[] instead of \f[I].webp\f[] -.SS extractor.simpcity.order-posts -.IP "Type:" 6 -\f[I]string\f[] - -.IP "Default:" 9 -\f[I]"desc"\f[] - -.IP "Description:" 4 -Controls the order in which -posts of a \f[I]thread\f[] are processed. - -\f[I]"asc"\f[] -Ascending order (oldest first) -\f[I]"desc"\f[] | \f[I]"reverse"\f[] -Descending order (newest first) - - .SS extractor.sizebooru.metadata .IP "Type:" 6 \f[I]bool\f[] @@ -6558,6 +6849,17 @@ Download audio tracks using \f[I]ytdl\f[] Ignore audio tracks +.SS extractor.tiktok.covers +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Download video covers. + + .SS extractor.tiktok.videos .IP "Type:" 6 \f[I]bool\f[] @@ -7091,6 +7393,27 @@ Known available sizes are * \f[I]360x360\f[] +.SS extractor.twitter.limit +.IP "Type:" 6 +.br +* \f[I]integer\f[] +.br +* \f[I]list\f[] of \f[I]integers\f[] + +.IP "Default:" 9 +\f[I]50\f[] + +.IP "Example:" 4 +[40, 30, 20, 10, 5] + +.IP "Description:" 4 +Number of requested results per API query. + +When given as a \f[I]list\f[], +start with the first element as \f[I]count\f[] parameter +and switch to the next element whenever no results are returned. + + .SS extractor.twitter.logout .IP "Type:" 6 \f[I]bool\f[] @@ -7183,6 +7506,19 @@ It is possible to exclude unwanted Tweets using \f[I]image-filter \f[]. +.SS extractor.twitter.retries-api +.IP "Type:" 6 +\f[I]integer\f[] + +.IP "Default:" 9 +\f[I]9\f[] + +.IP "Description:" 4 +Maximum number of retries +for API requests when encountering server \f[I]errors\f[], +or \f[I]-1\f[] for infinite retries. + + .SS extractor.twitter.retweets .IP "Type:" 6 \f[I]bool\f[] @@ -7199,14 +7535,26 @@ will be taken from the original Tweets, not the Retweets. .SS extractor.twitter.search-limit .IP "Type:" 6 -\f[I]integer\f[] +.br +* \f[I]integer\f[] +.br +* \f[I]list\f[] of \f[I]integers\f[] .IP "Default:" 9 \f[I]20\f[] +.IP "Example:" 4 +[50, 20, 10, 5, 2] + .IP "Description:" 4 Number of requested results per search query. +When given as a \f[I]list\f[], +start with the first element as \f[I]count\f[] parameter +and switch to the next element when +\f[I]search-stop\f[] +is reached. + .SS extractor.twitter.search-pagination .IP "Type:" 6 @@ -7225,15 +7573,31 @@ Update the \f[I]max_id\f[] search query parameter to the Tweet ID value of the last retrieved Tweet. -.SS extractor.twitter.search-stop +.SS extractor.twitter.search-results .IP "Type:" 6 -\f[I]integer\f[] +\f[I]string\f[] .IP "Default:" 9 +\f[I]"latest"\f[] + +.IP "Description:" 4 +Determines the target of search results. + +.IP "Supported Values:" 4 .br -* \f[I]3\f[] if \f[I]search-pagination\f[] is set to \f[I]"cursor"\f[] +* \f[I]"top"\f[] .br -* \f[I]0\f[] otherwise +* \f[I]"media"\f[] +.br +* \f[I]"latest"\f[] | \f[I]"live"\f[] + + +.SS extractor.twitter.search-stop +.IP "Type:" 6 +\f[I]integer\f[] + +.IP "Default:" 9 +\f[I]3\f[] .IP "Description:" 4 Number of empty search result batches @@ -7298,18 +7662,6 @@ Extract \f[I]TwitPic\f[] embeds. Ignore previously seen Tweets. -.SS extractor.twitter.username-alt -.IP "Type:" 6 -\f[I]string\f[] - -.IP "Description:" 4 -Alternate Identifier (username, email, phone number) -when \f[I]logging in\f[]. - -When not specified and asked for by Twitter, -this identifier will need to be entered in an interactive prompt. - - .SS extractor.twitter.users .IP "Type:" 6 \f[I]string\f[] @@ -7318,7 +7670,7 @@ this identifier will need to be entered in an interactive prompt. \f[I]"user"\f[] .IP "Example:" 4 -"https://twitter.com/search?q=from:{legacy[screen_name]}" +"https://twitter.com/search?q=from:{core[screen_name]}" .IP "Description:" 4 Basic format string for user URLs generated from @@ -7723,6 +8075,18 @@ If this value is \f[I]"original"\f[], metadata for these files will be taken from the original posts, not the retweeted posts. +.SS extractor.weibo.text +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]false\f[] + +.IP "Description:" 4 +Extract full \f[I]text\f[] & \f[I]text_raw\f[] metadata +for statuses with truncated \f[I]text\f[]. + + .SS extractor.weibo.videos .IP "Type:" 6 \f[I]bool\f[] @@ -7734,6 +8098,21 @@ will be taken from the original posts, not the retweeted posts. Download video files. +.SS extractor.wikimedia.format +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]fandom\f[] | \f[I]wikigg\f[] +\f[I]"original"\f[] +otherwise +\f[I]""\f[] + +.IP "Description:" 4 +Sets the format query parameter value +added to all download URLs. + + .SS extractor.wikimedia.image-revisions .IP "Type:" 6 \f[I]integer\f[] @@ -7777,6 +8156,23 @@ The value must be between 10 and 500. For \f[I]Category:\f[] pages, recursively descent into subcategories. +.SS extractor.[xenforo].order-posts +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]"desc"\f[] + +.IP "Description:" 4 +Controls the order in which +posts of a \f[I]thread\f[] are processed. + +\f[I]"asc"\f[] +Ascending order (oldest first) +\f[I]"desc"\f[] | \f[I]"reverse"\f[] +Descending order (newest first) + + .SS extractor.ytdl.cmdline-args .IP "Type:" 6 .br @@ -8148,17 +8544,35 @@ into the actual output files. .SS downloader.*.part-directory .IP "Type:" 6 -\f[I]Path\f[] +.br +* \f[I]Path\f[] +.br +* \f[I]object\f[] (\f[I]Condition\f[] → \f[I]Path\f[]) .IP "Default:" 9 \f[I]null\f[] +.IP "Example:" 4 +.. code:: json + +"/tmp/.gdl" + +.. code:: json + +{ +"size > 100000": "~/.gdl/part", +"duration" : "/tmp/.gdl/video", +} + + .IP "Description:" 4 -Alternate location for \f[I].part\f[] files. +Alternate location(s) for \f[I].part\f[] files. + +.IP "Note:" 4 +If this value is \f[I]null\f[] or no \f[I]Conditions\f[] apply, +\f[I].part\f[] files are stored alongside the actual output files. -Missing directories will be created as needed. -If this value is \f[I]null\f[], \f[I].part\f[] files are going to be stored -alongside the actual output files. +For a single \f[I]Path\f[], missing directories will be created as needed .SS downloader.*.progress @@ -8842,8 +9256,6 @@ File to write logging output to. .IP "Description:" 4 File to write external URLs unsupported by *gallery-dl* to. -The default \f[I]Format String\f[] here is \f[I]"{message}"\f[]. - .SS output.errorfile .IP "Type:" 6 @@ -8855,8 +9267,6 @@ The default \f[I]Format String\f[] here is \f[I]"{message}"\f[]. .IP "Description:" 4 File to write input URLs which returned an error to. -The default \f[I]Format String\f[] here is also \f[I]"{message}"\f[]. - When combined with \f[I]-I\f[]/\f[I]--input-file-comment\f[] or \f[I]-x\f[]/\f[I]--input-file-delete\f[], @@ -9093,6 +9503,18 @@ On POSIX systems, this means enabling the to have it call \f[I]setsid()\f[]. +.SS exec.verbose +.IP "Type:" 6 +\f[I]bool\f[] + +.IP "Default:" 9 +\f[I]true\f[] + +.IP "Description:" 4 +Include \f[I]command\f[] +arguments in logging messages. + + .SS hash.chunk-size .IP "Type:" 6 \f[I]integer\f[] @@ -9200,9 +9622,14 @@ Write metadata using \f[I]json.dump()\f[] Write metadata in \f[I]JSON Lines\f[] format \f[I]"tags"\f[] Write \f[I]tags\f[] separated by newlines +\f[I]"print"\f[] +Write the result of applying +\f[I]content-format\f[] +to \f[I]stdout\f[] \f[I]"custom"\f[] -Write the result of applying \f[I]metadata.content-format\f[] -to a file's metadata dictionary +Write the result of applying +\f[I]content-format\f[] +to \f[I]a file\f[] \f[I]"modify"\f[] Add or modify metadata entries \f[I]"delete"\f[] @@ -9518,7 +9945,7 @@ Only applies to \f[I]"mode": "json"\f[] and \f[I]"jsonl"\f[]. .IP "Type:" 6 \f[I]string\f[] -.IP "Defsult:" 4 +.IP "Default:" 9 \f[I]"w"\f[] .IP "Description:" 4 @@ -9535,7 +9962,7 @@ See the \f[I]mode\f[] argument of \f[I]open()\f[] for further details. .IP "Type:" 6 \f[I]string\f[] -.IP "Defsult:" 4 +.IP "Default:" 9 \f[I]"utf-8"\f[] .IP "Description:" 4 @@ -9544,6 +9971,31 @@ Name of the encoding used to encode a file's content. See the \f[I]encoding\f[] argument of \f[I]open()\f[] for further details. +.SS metadata.newline +.IP "Type:" 6 +\f[I]string\f[] + +.IP "Default:" 9 +\f[I]null\f[] + +.IP "Description:" 4 +The newline sequence used in metadata files. + +If \f[I]null\f[], any \f[I]\\n\f[] characters +written are translated to the system default line separator. + +See the \f[I]newline\f[] argument of \f[I]open()\f[] for further details. + +.IP "Supported Values:" 4 +\f[I]null\f[] +Any \f[I]\\n\f[] characters +written are translated to the system default line separator. +\f[I]""\f[] \f[I] \f[I]"\\n"\f[] +Don't replace newline characters. +\f[I]"\\r"\f[] \f[] \f[I]"\\r\\n"\f[] +Replace newline characters with the given sequence. + + .SS metadata.private .IP "Type:" 6 \f[I]bool\f[] @@ -10710,7 +11162,8 @@ in a JSON file. "format" : "{asctime} {name}: {message}", "format-date": "%H:%M:%S", "path" : "~/log.txt", -"encoding" : "ascii" +"encoding" : "ascii", +"defer" : true } .. code:: json @@ -10744,7 +11197,12 @@ it is also possible to access the current and keywords objects and their attributes, for example \f[I]"{extractor.url}"\f[], \f[I]"{path.filename}"\f[], \f[I]"{keywords.title}"\f[] .br -* Default: \f[I]"[{name}][{levelname}] {message}"\f[] +* Default: +\f[I]"[{name}][{levelname}] {message}"\f[] for +\f[I]logfile\f[], +\f[I]"{message}"\f[] for +\f[I]unsupportedfile\f[] and +\f[I]errorfile\f[] .br * format-date .br @@ -10770,17 +11228,35 @@ and keywords objects and their attributes, for example use \f[I]"w"\f[] to truncate or \f[I]"a"\f[] to append (see \f[I]open()\f[]) .br -* Default: \f[I]"w"\f[] +* Default: +\f[I]"w"\f[] for +\f[I]logfile\f[] and +\f[I]unsupportedfile\f[], +\f[I]"a"\f[] for +\f[I]errorfile\f[] .br * encoding .br * File encoding .br * Default: \f[I]"utf-8"\f[] +.br +* defer +.br +* Defer file opening/creation until writing the first logging message +.br +* Default: +\f[I]false\f[] for +\f[I]logfile\f[], +\f[I]true\f[] for +\f[I]unsupportedfile\f[] and +\f[I]errorfile\f[] + .IP "Note:" 4 -path, mode, and encoding are only applied when configuring -logging output to a file. +path, mode, encoding, and defer +are only applied when configuring logging output to a file. +(See \f[I]logging.FileHandler\f[]) .SS Postprocessor Configuration diff --git a/docs/gallery-dl.conf b/docs/gallery-dl.conf index 5fdca47..25eea53 100644 --- a/docs/gallery-dl.conf +++ b/docs/gallery-dl.conf @@ -47,13 +47,16 @@ "image-filter" : null, "image-range" : null, "image-unique" : false, + "post-filter" : null, + "post-range" : null, "chapter-filter": null, "chapter-range" : null, "chapter-unique": false, "keywords" : {}, - "keywords-eval" : false, "keywords-default" : null, + "keywords-eval" : false, + "keywords-global" : {}, "parent-directory": false, "parent-metadata" : false, @@ -168,11 +171,12 @@ "username": "", "password": "", - "include" : ["media"], - "metadata": false, - "quoted" : false, - "reposts" : false, - "videos" : true, + "api-server": null, + "include" : ["media"], + "metadata" : false, + "quoted" : false, + "reposts" : false, + "videos" : true, "likes": { "depth" : 0, @@ -215,6 +219,8 @@ "include" : ["user-images", "user-videos"], "metadata": false, "nsfw" : true, + "period" : "AllTime", + "sort" : "Newest", "quality" : "original=true", "quality-videos": "quality=100" }, @@ -244,7 +250,8 @@ }, "cyberfile": { - "password": "" + "password" : "", + "recursive": true }, "dankefuerslesen": { @@ -335,7 +342,11 @@ "comments": false, "embeds" : true, "fee-max" : null, - "metadata": false + "metadata": false, + + "creator": { + "offset": 0 + } }, "fansly": { @@ -397,6 +408,7 @@ "token": "", "sleep-request": "0.5-1.5", + "cbz" : false, "format": ["0", "1600", "1280", "980", "780"], "tags" : false }, @@ -466,6 +478,7 @@ { "sleep-request": "0.5-1.5", "include": "gallery", + "order" : "desc", "videos" : true }, "iwara": @@ -520,6 +533,7 @@ "api-server": "https://api.mangadex.org", "api-parameters": null, + "data-saver": false, "lang": null, "ratings": ["safe", "suggestive", "erotica", "pornographic"] }, @@ -576,7 +590,11 @@ "cursor" : true, "files" : ["images", "image_large", "attachments", "postfile", "content"], "format-images": "download_url", + "order-posts" : "desc", + "collection": { + "order-posts": "asc" + }, "user": { "date-max" : 0 } @@ -658,24 +676,25 @@ }, "reddit": { + "cookies" : null, "client-id" : null, "user-agent" : null, "refresh-token": null, - "api" : "oauth", + "api" : "rest", "comments" : 0, "morecomments": false, "embeds" : true, "date-min" : 0, "date-max" : 253402210800, - "date-format" : "%Y-%m-%dT%H:%M:%S", + "date-format" : null, "id-min" : null, "id-max" : null, "limit" : null, "previews" : true, "recursion" : 0, "selftext" : null, - "videos" : true + "videos" : "dash" }, "redgifs": { @@ -713,6 +732,7 @@ "token": "", "sleep-request": "0.5-1.5", + "cbz" : false, "format": ["0", "1600", "1280", "980", "780"], "tags" : false }, @@ -726,12 +746,6 @@ { "gifs": true }, - "simpcity": - { - "cookies": null, - - "order-posts": "desc" - }, "sizebooru": { "sleep-request": "0.5-1.5", @@ -803,6 +817,7 @@ { "audio" : true, "videos": true, + "covers": false, "user": { "avatar": true, @@ -841,7 +856,6 @@ "twitter": { "username" : "", - "username-alt": "", "password" : "", "cookies" : null, @@ -852,16 +866,19 @@ "cursor" : true, "expand" : false, "include" : ["timeline"], + "limit" : 50, "locked" : "abort", "logout" : true, "pinned" : false, "quoted" : false, "ratelimit" : "wait", "replies" : true, + "retries-api" : 9, "retweets" : false, "search-limit": 20, "search-pagination": "cursor", - "search-stop" : "auto", + "search-results" : "latest", + "search-stop" : 3, "size" : ["orig", "4096x4096", "large", "medium", "small"], "text-tweets" : false, "tweet-endpoint": "auto", @@ -941,6 +958,7 @@ "livephoto": true, "movies" : false, "retweets" : false, + "text" : false, "videos" : true }, "xfolio": @@ -981,6 +999,11 @@ "videos" : true }, + "chevereto": + { + "parent-metadata": true + }, + "Danbooru": { "sleep-request": "0.5-1.5", @@ -1050,6 +1073,11 @@ "referer": false }, + "imagehost": + { + "parent-metadata": true + }, + "mastodon": { "access-token": null, @@ -1062,9 +1090,13 @@ "misskey": { "access-token": null, + "date-min" : null, + "date-max" : null, "include" : ["notes"], + "order-posts" : "desc", "renotes" : false, - "replies" : true + "replies" : true, + "text-posts" : false }, "Nijie": @@ -1122,10 +1154,36 @@ "wikimedia": { "sleep-request": "1.0-2.0", + "format": "", "image-revisions": 1, "limit": 50, "subcategories": true }, + "fandom": + { + "format": "original" + }, + "wikigg": + { + "format": "original" + }, + + "xenforo": + { + "order-posts": "desc" + }, + "nudostarforum": + { + "username": "", + "password": "", + "cookies" : null + }, + "simpcity": + { + "username": "", + "password": "", + "cookies" : null + }, "booru": { diff --git a/gallery_dl.egg-info/PKG-INFO b/gallery_dl.egg-info/PKG-INFO index 6a8f856..abf3e16 100644 --- a/gallery_dl.egg-info/PKG-INFO +++ b/gallery_dl.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.4 Name: gallery_dl -Version: 1.30.10 +Version: 1.31.1 Summary: Command-line program to download image galleries and collections from several image hosting sites Home-page: https://github.com/mikf/gallery-dl Download-URL: https://github.com/mikf/gallery-dl/releases/latest @@ -24,6 +24,7 @@ Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Classifier: Programming Language :: Python :: 3.12 Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Classifier: Topic :: Internet :: WWW/HTTP @@ -141,9 +142,9 @@ Standalone Executable Prebuilt executable files with a Python interpreter and required Python packages included are available for -- `Windows `__ +- `Windows `__ (Requires `Microsoft Visual C++ Redistributable Package (x86) `__) -- `Linux `__ +- `Linux `__ Nightly Builds @@ -224,6 +225,13 @@ Pulling image from `GitHub Container Registry = 0 else float("inf"), + "retries": self.retries+1 if self.retries >= 0 else float("inf"), "socket_timeout": self.config("timeout", extractor._timeout), "nocheckcertificate": not self.config("verify", extractor._verify), "proxy": self.proxies.get("http") if self.proxies else None, @@ -39,17 +39,25 @@ class YoutubeDLDownloader(DownloaderBase): def download(self, url, pathfmt): kwdict = pathfmt.kwdict + tries = 0 - ytdl_instance = kwdict.pop("_ytdl_instance", None) - if not ytdl_instance: + if ytdl_instance := kwdict.pop("_ytdl_instance", None): + # 'ytdl' extractor + self._prepare(ytdl_instance) + info_dict = kwdict.pop("_ytdl_info_dict") + else: + # other extractors ytdl_instance = self.ytdl_instance if not ytdl_instance: try: module = ytdl.import_module(self.config("module")) except (ImportError, SyntaxError) as exc: - self.log.error("Cannot import module '%s'", - getattr(exc, "name", "")) - self.log.debug("", exc_info=exc) + if exc.__context__: + self.log.error("Cannot import yt-dlp or youtube-dl") + else: + self.log.error("Cannot import module '%s'", + getattr(exc, "name", "")) + self.log.traceback(exc) self.download = lambda u, p: False return False @@ -63,6 +71,8 @@ class YoutubeDLDownloader(DownloaderBase): module, self, self.ytdl_opts) if self.outtmpl == "default": self.outtmpl = module.DEFAULT_OUTTMPL + self._prepare(ytdl_instance) + if self.forward_cookies: self.log.debug("Forwarding cookies to %s", ytdl_instance.__module__) @@ -70,45 +80,150 @@ class YoutubeDLDownloader(DownloaderBase): for cookie in self.session.cookies: set_cookie(cookie) - if "__gdl_initialize" in ytdl_instance.params: - del ytdl_instance.params["__gdl_initialize"] + url = url[5:] + manifest = kwdict.get("_ytdl_manifest") + while True: + tries += 1 + self.error = None + try: + if manifest is None: + info_dict = self._extract_url( + ytdl_instance, url) + else: + info_dict = self._extract_manifest( + ytdl_instance, url, kwdict) + except Exception as exc: + self.log.traceback(exc) + cls = exc.__class__ + if cls.__module__ == "builtins": + tries = False + msg = f"{cls.__name__}: {exc}" + else: + if self.error is not None: + msg = self.error + elif not info_dict: + msg = "Empty 'info_dict' data" + else: + break + + if tries: + self.log.error("%s (%s/%s)", msg, tries, self.retries+1) + else: + self.log.error(msg) + return False + if tries > self.retries: + return False - if self.progress is not None: - ytdl_instance.add_progress_hook(self._progress_hook) - if rlf := ytdl_instance.params.pop("__gdl_ratelimit_func", False): - self.rate_dyn = rlf + if extra := kwdict.get("_ytdl_extra"): + info_dict.update(extra) - info_dict = kwdict.pop("_ytdl_info_dict", None) - if not info_dict: - url = url[5:] + while True: + tries += 1 + self.error = None try: - if manifest := kwdict.pop("_ytdl_manifest", None): - info_dict = self._extract_manifest( - ytdl_instance, url, manifest, - kwdict.pop("_ytdl_manifest_data", None), - kwdict.pop("_ytdl_manifest_headers", None), - kwdict.pop("_ytdl_manifest_cookies", None)) + if "entries" in info_dict: + success = self._download_playlist( + ytdl_instance, pathfmt, info_dict) else: - info_dict = self._extract_info(ytdl_instance, url) + success = self._download_video( + ytdl_instance, pathfmt, info_dict) except Exception as exc: - self.log.debug("", exc_info=exc) - self.log.warning("%s: %s", exc.__class__.__name__, exc) + self.log.traceback(exc) + cls = exc.__class__ + if cls.__module__ == "builtins": + tries = False + msg = f"{cls.__name__}: {exc}" + else: + if self.error is not None: + msg = self.error + elif not success: + msg = "Error" + else: + break - if not info_dict: + if tries: + self.log.error("%s (%s/%s)", msg, tries, self.retries+1) + else: + self.log.error(msg) return False + if tries > self.retries: + return False + return True + + def _extract_url(self, ytdl, url): + return ytdl.extract_info(url, download=False) + + def _extract_manifest(self, ytdl, url, kwdict): + extr = ytdl.get_info_extractor("Generic") + video_id = extr._generic_id(url) + + if cookies := kwdict.get("_ytdl_manifest_cookies"): + if isinstance(cookies, dict): + cookies = cookies.items() + set_cookie = ytdl.cookiejar.set_cookie + for name, value in cookies: + set_cookie(Cookie( + 0, name, value, None, False, + "", False, False, "/", False, + False, None, False, None, None, {}, + )) + + type = kwdict["_ytdl_manifest"] + data = kwdict.get("_ytdl_manifest_data") + headers = kwdict.get("_ytdl_manifest_headers") + if type == "hls": + if data is None: + try: + fmts, subs = extr._extract_m3u8_formats_and_subtitles( + url, video_id, "mp4", headers=headers) + except AttributeError: + fmts = extr._extract_m3u8_formats( + url, video_id, "mp4", headers=headers) + subs = None + else: + try: + fmts, subs = extr._parse_m3u8_formats_and_subtitles( + data, url, "mp4", headers=headers) + except AttributeError: + fmts = extr._parse_m3u8_formats( + data, url, "mp4", headers=headers) + subs = None - if "entries" in info_dict: - index = kwdict.get("_ytdl_index") - if index is None: - return self._download_playlist( - ytdl_instance, pathfmt, info_dict) + elif type == "dash": + if data is None: + try: + fmts, subs = extr._extract_mpd_formats_and_subtitles( + url, video_id, headers=headers) + except AttributeError: + fmts = extr._extract_mpd_formats( + url, video_id, headers=headers) + subs = None else: - info_dict = info_dict["entries"][index] + if isinstance(data, str): + data = ElementTree.fromstring(data) + try: + fmts, subs = extr._parse_mpd_formats_and_subtitles( + data, mpd_id="dash") + except AttributeError: + fmts = extr._parse_mpd_formats( + data, mpd_id="dash") + subs = None - if extra := kwdict.get("_ytdl_extra"): - info_dict.update(extra) + else: + raise ValueError(f"Unsupported manifest type '{type}'") - return self._download_video(ytdl_instance, pathfmt, info_dict) + if headers: + for fmt in fmts: + fmt["http_headers"] = headers + + info_dict = { + "extractor": "", + "id" : video_id, + "title" : video_id, + "formats" : fmts, + "subtitles": subs, + } + return ytdl.process_ie_result(info_dict, download=False) def _download_video(self, ytdl_instance, pathfmt, info_dict): if "url" in info_dict: @@ -161,12 +276,7 @@ class YoutubeDLDownloader(DownloaderBase): path = pathfmt.realpath.replace("%", "%%") self._set_outtmpl(ytdl_instance, path) - try: - ytdl_instance.process_info(info_dict) - except Exception as exc: - self.log.debug("", exc_info=exc) - return False - + ytdl_instance.process_info(info_dict) pathfmt.temppath = info_dict.get("filepath") or info_dict["_filename"] return True @@ -188,78 +298,20 @@ class YoutubeDLDownloader(DownloaderBase): ytdl_instance.process_info(entry) status = True except Exception as exc: - self.log.debug("", exc_info=exc) + self.log.traceback(exc) self.log.error("%s: %s", exc.__class__.__name__, exc) return status - def _extract_info(self, ytdl, url): - return ytdl.extract_info(url, download=False) - - def _extract_manifest(self, ytdl, url, manifest_type, manifest_data=None, - headers=None, cookies=None): - extr = ytdl.get_info_extractor("Generic") - video_id = extr._generic_id(url) - - if cookies is not None: - if isinstance(cookies, dict): - cookies = cookies.items() - set_cookie = ytdl.cookiejar.set_cookie - for name, value in cookies: - set_cookie(Cookie( - 0, name, value, None, False, - "", False, False, "/", False, - False, None, False, None, None, {}, - )) + def _prepare(self, ytdl_instance): + if "__gdl_initialize" not in ytdl_instance.params: + return - if manifest_type == "hls": - if manifest_data is None: - try: - fmts, subs = extr._extract_m3u8_formats_and_subtitles( - url, video_id, "mp4", headers=headers) - except AttributeError: - fmts = extr._extract_m3u8_formats( - url, video_id, "mp4", headers=headers) - subs = None - else: - try: - fmts, subs = extr._parse_m3u8_formats_and_subtitles( - url, video_id, "mp4") - except AttributeError: - fmts = extr._parse_m3u8_formats(url, video_id, "mp4") - subs = None - - elif manifest_type == "dash": - if manifest_data is None: - try: - fmts, subs = extr._extract_mpd_formats_and_subtitles( - url, video_id, headers=headers) - except AttributeError: - fmts = extr._extract_mpd_formats( - url, video_id, headers=headers) - subs = None - else: - if isinstance(manifest_data, str): - manifest_data = ElementTree.fromstring(manifest_data) - try: - fmts, subs = extr._parse_mpd_formats_and_subtitles( - manifest_data, mpd_id="dash") - except AttributeError: - fmts = extr._parse_mpd_formats( - manifest_data, mpd_id="dash") - subs = None - - else: - self.log.error("Unsupported manifest type '%s'", manifest_type) - return None - - info_dict = { - "extractor": "", - "id" : video_id, - "title" : video_id, - "formats" : fmts, - "subtitles": subs, - } - return ytdl.process_ie_result(info_dict, download=False) + del ytdl_instance.params["__gdl_initialize"] + if self.progress is not None: + ytdl_instance.add_progress_hook(self._progress_hook) + if rlf := ytdl_instance.params.pop("__gdl_ratelimit_func", False): + self.rate_dyn = rlf + ytdl_instance.params["logger"] = LoggerAdapter(self, ytdl_instance) def _progress_hook(self, info): if info["status"] == "downloading" and \ @@ -284,6 +336,31 @@ class YoutubeDLDownloader(DownloaderBase): ytdl_instance.params["outtmpl"] = {"default": outtmpl} +class LoggerAdapter(): + __slots__ = ("obj", "log") + + def __init__(self, obj, ytdl_instance): + self.obj = obj + self.log = ytdl_instance.params.get("logger") + + def debug(self, msg): + if self.log is not None: + if msg[0] == "[": + msg = msg[msg.find("]")+2:] + self.log.debug(msg) + + def warning(self, msg): + if self.log is not None: + if "WARNING:" in msg: + msg = msg[msg.find(" ")+1:] + self.log.warning(msg) + + def error(self, msg): + if "ERROR:" in msg: + msg = msg[msg.find(" ")+1:] + self.obj.error = msg + + def compatible_formats(formats): """Returns True if 'formats' are compatible for merge""" video_ext = formats[0].get("ext") diff --git a/gallery_dl/dt.py b/gallery_dl/dt.py new file mode 100644 index 0000000..b37ebf3 --- /dev/null +++ b/gallery_dl/dt.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright 2025 Mike Fährmann +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +"""Date/Time utilities""" + +import sys +import time +from datetime import datetime, date, timedelta, timezone # noqa F401 + + +class NullDatetime(datetime): + + def __bool__(self): + return False + + def __str__(self): + return "[Invalid DateTime]" + + def __format__(self, format_spec): + return "[Invalid DateTime]" + + +NONE = NullDatetime(1, 1, 1) +EPOCH = datetime(1970, 1, 1) +SECOND = timedelta(0, 1) + + +def normalize(dt): + # if (o := dt.utcoffset()) is not None: + # return dt.replace(tzinfo=None, microsecond=0) - o + if dt.tzinfo is not None: + return dt.astimezone(timezone.utc).replace(tzinfo=None, microsecond=0) + if dt.microsecond: + return dt.replace(microsecond=0) + return dt + + +def convert(value): + """Convert 'value' to a naive UTC datetime object""" + if not value: + return NONE + if isinstance(value, datetime): + return normalize(value) + if isinstance(value, str) and (dt := parse_iso(value)) is not NONE: + return dt + return parse_ts(value) + + +def parse(dt_string, format): + """Parse 'dt_string' according to 'format'""" + try: + return normalize(datetime.strptime(dt_string, format)) + except Exception: + return NONE + + +if sys.hexversion < 0x30c0000: + # Python <= 3.11 + def parse_iso(dt_string): + """Parse 'dt_string' as ISO 8601 value""" + try: + if dt_string[-1] == "Z": + # compat for Python < 3.11 + dt_string = dt_string[:-1] + elif dt_string[-5] in "+-": + # compat for Python < 3.11 + dt_string = f"{dt_string[:-2]}:{dt_string[-2:]}" + return normalize(datetime.fromisoformat(dt_string)) + except Exception: + return NONE + + from_ts = datetime.utcfromtimestamp + now = datetime.utcnow + +else: + # Python >= 3.12 + def parse_iso(dt_string): + """Parse 'dt_string' as ISO 8601 value""" + try: + return normalize(datetime.fromisoformat(dt_string)) + except Exception: + return NONE + + def from_ts(ts=None): + """Convert Unix timestamp to naive UTC datetime""" + Y, m, d, H, M, S, _, _, _ = time.gmtime(ts) + return datetime(Y, m, d, H, M, S) + + now = from_ts + + +def parse_ts(ts, default=NONE): + """Create a datetime object from a Unix timestamp""" + try: + return from_ts(int(ts)) + except Exception: + return default + + +def to_ts(dt): + """Convert naive UTC datetime to Unix timestamp""" + return (dt - EPOCH) / SECOND + + +def to_ts_string(dt): + """Convert naive UTC datetime to Unix timestamp string""" + try: + return str((dt - EPOCH) // SECOND) + except Exception: + return "" diff --git a/gallery_dl/extractor/2ch.py b/gallery_dl/extractor/2ch.py index 912a251..1f17c99 100644 --- a/gallery_dl/extractor/2ch.py +++ b/gallery_dl/extractor/2ch.py @@ -4,28 +4,28 @@ # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. -"""Extractors for https://2ch.su/""" +"""Extractors for https://2ch.org/""" from .common import Extractor, Message from .. import text, util -BASE_PATTERN = r"(?:https?://)?2ch\.(su|life|hk)" +BASE_PATTERN = r"(?:https?://)?2ch\.(org|su|life|hk)" class _2chThreadExtractor(Extractor): """Extractor for 2ch threads""" category = "2ch" subcategory = "thread" - root = "https://2ch.su" + root = "https://2ch.org" directory_fmt = ("{category}", "{board}", "{thread} {title}") filename_fmt = "{tim}{filename:? //}.{extension}" archive_fmt = "{board}_{thread}_{tim}" pattern = rf"{BASE_PATTERN}/([^/?#]+)/res/(\d+)" - example = "https://2ch.su/a/res/12345.html" + example = "https://2ch.org/a/res/12345.html" def __init__(self, match): tld = match[1] - self.root = f"https://2ch.{'su' if tld == 'hk' else tld}" + self.root = f"https://2ch.{'org' if tld == 'hk' else tld}" Extractor.__init__(self, match) def items(self): @@ -42,11 +42,11 @@ class _2chThreadExtractor(Extractor): "title" : text.unescape(title)[:50], } - yield Message.Directory, thread + yield Message.Directory, "", thread for post in posts: if files := post.get("files"): post["post_name"] = post["name"] - post["date"] = text.parse_timestamp(post["timestamp"]) + post["date"] = self.parse_timestamp(post["timestamp"]) del post["files"] del post["name"] @@ -65,9 +65,9 @@ class _2chBoardExtractor(Extractor): """Extractor for 2ch boards""" category = "2ch" subcategory = "board" - root = "https://2ch.su" + root = "https://2ch.org" pattern = rf"{BASE_PATTERN}/([^/?#]+)/?$" - example = "https://2ch.su/a/" + example = "https://2ch.org/a/" def __init__(self, match): tld = match[1] diff --git a/gallery_dl/extractor/2chan.py b/gallery_dl/extractor/2chan.py index 9927b5a..0e250c9 100644 --- a/gallery_dl/extractor/2chan.py +++ b/gallery_dl/extractor/2chan.py @@ -31,7 +31,7 @@ class _2chanThreadExtractor(Extractor): f"/{self.board}/res/{self.thread}.htm") page = self.request(url).text data = self.metadata(page) - yield Message.Directory, data + yield Message.Directory, "", data for post in self.posts(page): if "filename" not in post: continue diff --git a/gallery_dl/extractor/2chen.py b/gallery_dl/extractor/2chen.py index ee3510c..4456fd6 100644 --- a/gallery_dl/extractor/2chen.py +++ b/gallery_dl/extractor/2chen.py @@ -1,40 +1,55 @@ # -*- coding: utf-8 -*- +# Copyright 2022-2025 Mike Fährmann +# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. -"""Extractors for https://sturdychan.help/""" +"""Extractors for 2chen boards""" -from .common import Extractor, Message +from .common import BaseExtractor, Message from .. import text -BASE_PATTERN = r"(?:https?://)?(?:sturdychan.help|2chen\.(?:moe|club))" +class _2chenExtractor(BaseExtractor): + basecategory = "2chen" -class _2chenThreadExtractor(Extractor): + +BASE_PATTERN = _2chenExtractor.update({ + "sturdychan": { + "root": "https://sturdychan.help", + "pattern": r"(?:sturdychan\.help|2chen\.(?:moe|club))", + }, + "schan": { + "root": "https://schan.help/", + "pattern": r"schan\.help", + }, +}) + + +class _2chenThreadExtractor(_2chenExtractor): """Extractor for 2chen threads""" - category = "2chen" subcategory = "thread" - root = "https://sturdychan.help" directory_fmt = ("{category}", "{board}", "{thread} {title}") filename_fmt = "{time} {filename}.{extension}" - archive_fmt = "{board}_{thread}_{hash}_{time}" - pattern = BASE_PATTERN + r"/([^/?#]+)/(\d+)" + archive_fmt = "{board}_{thread}_{no}_{time}" + pattern = rf"{BASE_PATTERN}/([^/?#]+)/(\d+)" example = "https://sturdychan.help/a/12345/" - def __init__(self, match): - Extractor.__init__(self, match) - self.board, self.thread = match.groups() - def items(self): - url = f"{self.root}/{self.board}/{self.thread}" + board = self.groups[-2] + thread = self.kwdict["thread"] = self.groups[-1] + url = f"{self.root}/{board}/{thread}" page = self.request(url, encoding="utf-8", notfound="thread").text - data = self.metadata(page) - yield Message.Directory, data - for post in self.posts(page): + self.kwdict["board"], pos = text.extract( + page, 'class="board">/', '/<') + self.kwdict["title"] = text.unescape(text.extract( + page, "

", "

", pos)[0]) + yield Message.Directory, "", {} + for post in self.posts(page): url = post["url"] if not url: continue @@ -42,20 +57,10 @@ class _2chenThreadExtractor(Extractor): url = self.root + url post["url"] = url = url.partition("?")[0] - post.update(data) post["time"] = text.parse_int(post["date"].timestamp()) yield Message.Url, url, text.nameext_from_url( post["filename"], post) - def metadata(self, page): - board, pos = text.extract(page, 'class="board">/', '/<') - title = text.extract(page, "

", "

", pos)[0] - return { - "board" : board, - "thread": self.thread, - "title" : text.unescape(title), - } - def posts(self, page): """Return iterable with relevant posts""" return map(self.parse, text.extract_iter( @@ -65,31 +70,25 @@ class _2chenThreadExtractor(Extractor): extr = text.extract_from(post) return { "name" : text.unescape(extr("", "")), - "date" : text.parse_datetime( + "date" : self.parse_datetime( extr("")[2], "%d %b %Y (%a) %H:%M:%S" ), "no" : extr('href="#p', '"'), - "url" : extr('", "', ""), - "date": text.parse_datetime( + "date": self.parse_datetime_iso( (extr('class="dateTime">', "<") or - extr('class="dateTime postNum" >', "<")).strip(), - "%Y-%m-%d %H:%M:%S"), + extr('class="dateTime postNum" >', "<")).strip()), "no" : text.parse_int(extr(">Post No.", "<")), } if 'class="file"' in post: diff --git a/gallery_dl/extractor/4chan.py b/gallery_dl/extractor/4chan.py index d81f305..ba24899 100644 --- a/gallery_dl/extractor/4chan.py +++ b/gallery_dl/extractor/4chan.py @@ -38,7 +38,7 @@ class _4chanThreadExtractor(Extractor): "title" : text.unescape(title)[:50], } - yield Message.Directory, data + yield Message.Directory, "", data for post in posts: if "filename" in post: post.update(data) diff --git a/gallery_dl/extractor/4chanarchives.py b/gallery_dl/extractor/4chanarchives.py index c187b41..16f4b39 100644 --- a/gallery_dl/extractor/4chanarchives.py +++ b/gallery_dl/extractor/4chanarchives.py @@ -40,7 +40,7 @@ class _4chanarchivesThreadExtractor(Extractor): for post in posts: post.update(data) - yield Message.Directory, post + yield Message.Directory, "", post if "url" in post: yield Message.Url, post["url"], post diff --git a/gallery_dl/extractor/500px.py b/gallery_dl/extractor/500px.py index d1ac503..b74bc90 100644 --- a/gallery_dl/extractor/500px.py +++ b/gallery_dl/extractor/500px.py @@ -31,7 +31,7 @@ class _500pxExtractor(Extractor): photo["extension"] = photo["image_format"] if data: photo.update(data) - yield Message.Directory, photo + yield Message.Directory, "", photo yield Message.Url, url, photo def metadata(self): @@ -92,7 +92,7 @@ class _500pxExtractor(Extractor): class _500pxUserExtractor(_500pxExtractor): """Extractor for photos from a user's photostream on 500px.com""" subcategory = "user" - pattern = BASE_PATTERN + r"/(?!photo/|liked)(?:p/)?([^/?#]+)/?(?:$|[?#])" + pattern = rf"{BASE_PATTERN}/(?!photo/|liked)(?:p/)?([^/?#]+)/?(?:$|[?#])" example = "https://500px.com/USER" def __init__(self, match): @@ -121,8 +121,8 @@ class _500pxGalleryExtractor(_500pxExtractor): """Extractor for photo galleries on 500px.com""" subcategory = "gallery" directory_fmt = ("{category}", "{user[username]}", "{gallery[name]}") - pattern = (BASE_PATTERN + r"/(?!photo/)(?:p/)?" - r"([^/?#]+)/galleries/([^/?#]+)") + pattern = (rf"{BASE_PATTERN}/(?!photo/)(?:p/)?" + rf"([^/?#]+)/galleries/([^/?#]+)") example = "https://500px.com/USER/galleries/GALLERY" def __init__(self, match): @@ -178,7 +178,7 @@ class _500pxGalleryExtractor(_500pxExtractor): class _500pxFavoriteExtractor(_500pxExtractor): """Extractor for favorite 500px photos""" subcategory = "favorite" - pattern = BASE_PATTERN + r"/liked/?$" + pattern = rf"{BASE_PATTERN}/liked/?$" example = "https://500px.com/liked" def photos(self): @@ -202,7 +202,7 @@ class _500pxFavoriteExtractor(_500pxExtractor): class _500pxImageExtractor(_500pxExtractor): """Extractor for individual images from 500px.com""" subcategory = "image" - pattern = BASE_PATTERN + r"/photo/(\d+)" + pattern = rf"{BASE_PATTERN}/photo/(\d+)" example = "https://500px.com/photo/12345/TITLE" def __init__(self, match): diff --git a/gallery_dl/extractor/8chan.py b/gallery_dl/extractor/8chan.py index 0385067..3230182 100644 --- a/gallery_dl/extractor/8chan.py +++ b/gallery_dl/extractor/8chan.py @@ -9,9 +9,8 @@ """Extractors for https://8chan.moe/""" from .common import Extractor, Message -from .. import text, util +from .. import text, dt from ..cache import memcache -from datetime import timedelta import itertools BASE_PATTERN = r"(?:https?://)?8chan\.(moe|se|cc)" @@ -44,7 +43,7 @@ class _8chanExtractor(Extractor): def cookies_prepare(self): # fetch captcha cookies # (necessary to download without getting interrupted) - now = util.datetime_utcnow() + now = dt.now() url = self.root + "/captcha.js" params = {"d": now.strftime("%a %b %d %Y %H:%M:%S GMT+0000 (UTC)")} self.request(url, params=params).content @@ -57,7 +56,7 @@ class _8chanExtractor(Extractor): if cookie.domain.endswith(domain): cookie.expires = None if cookie.name == "captchaexpiration": - cookie.value = (now + timedelta(30, 300)).strftime( + cookie.value = (now + dt.timedelta(30, 300)).strftime( "%a, %d %b %Y %H:%M:%S GMT") return self.cookies @@ -70,7 +69,7 @@ class _8chanThreadExtractor(_8chanExtractor): "{threadId} {subject[:50]}") filename_fmt = "{postId}{num:?-//} {filename[:200]}.{extension}" archive_fmt = "{boardUri}_{postId}_{num}" - pattern = BASE_PATTERN + r"/([^/?#]+)/(?:res|last)/(\d+)" + pattern = rf"{BASE_PATTERN}/([^/?#]+)/(?:res|last)/(\d+)" example = "https://8chan.moe/a/res/12345.html" def items(self): @@ -92,7 +91,7 @@ class _8chanThreadExtractor(_8chanExtractor): # download files posts = thread.pop("posts", ()) - yield Message.Directory, thread + yield Message.Directory, "", thread for post in itertools.chain((thread,), posts): files = post.pop("files", ()) if not files: @@ -108,7 +107,7 @@ class _8chanThreadExtractor(_8chanExtractor): class _8chanBoardExtractor(_8chanExtractor): """Extractor for 8chan boards""" subcategory = "board" - pattern = BASE_PATTERN + r"/([^/?#]+)/(?:(\d+)\.html)?$" + pattern = rf"{BASE_PATTERN}/([^/?#]+)/(?:(\d+)\.html)?$" example = "https://8chan.moe/a/" def items(self): diff --git a/gallery_dl/extractor/8muses.py b/gallery_dl/extractor/8muses.py index 120cd8a..a8d8b44 100644 --- a/gallery_dl/extractor/8muses.py +++ b/gallery_dl/extractor/8muses.py @@ -40,7 +40,7 @@ class _8musesAlbumExtractor(Extractor): if images := data.get("pictures"): count = len(images) album = self._make_album(data["album"]) - yield Message.Directory, {"album": album, "count": count} + yield Message.Directory, "", {"album": album, "count": count} for num, image in enumerate(images, 1): url = self.root + "/image/fl/" + image["publicUri"] img = { @@ -85,8 +85,7 @@ class _8musesAlbumExtractor(Extractor): "parent" : text.parse_int(album["parentId"]), "views" : text.parse_int(album["numberViews"]), "likes" : text.parse_int(album["numberLikes"]), - "date" : text.parse_datetime( - album["updatedAt"], "%Y-%m-%dT%H:%M:%S.%fZ"), + "date" : self.parse_datetime_iso(album["updatedAt"]), } def _unobfuscate(self, data): diff --git a/gallery_dl/extractor/__init__.py b/gallery_dl/extractor/__init__.py index c7e33c8..64134d0 100644 --- a/gallery_dl/extractor/__init__.py +++ b/gallery_dl/extractor/__init__.py @@ -26,8 +26,10 @@ modules = [ "ao3", "arcalive", "architizer", + "arena", "artstation", "aryion", + "audiochan", "batoto", "bbc", "behance", @@ -39,9 +41,11 @@ modules = [ "booth", "bunkr", "catbox", + "cfake", "chevereto", "cien", "civitai", + "comedywildlifephoto", "comick", "comicvine", "cyberdrop", @@ -54,6 +58,7 @@ modules = [ "discord", "dynastyscans", "e621", + "eporner", "erome", "everia", "exhentai", @@ -63,6 +68,8 @@ modules = [ "fantia", "fapello", "fapachi", + "fikfap", + "fitnakedgirls", "flickr", "furaffinity", "furry34", @@ -106,6 +113,7 @@ modules = [ "kemono", "khinsider", "komikcast", + "koofr", "leakgallery", "lensdump", "lexica", @@ -140,12 +148,14 @@ modules = [ "nozomi", "nsfwalbum", "nudostar", + "okporn", "paheal", "patreon", "pexels", "philomena", "photovogue", "picarto", + "picazor", "pictoa", "piczel", "pillowfort", @@ -158,12 +168,12 @@ modules = [ "poringa", "pornhub", "pornpics", + "pornstarstube", "postmill", "rawkuma", "reactor", "readcomiconline", "realbooru", - "redbust", "reddit", "redgifs", "rule34us", @@ -179,7 +189,6 @@ modules = [ "senmanga", "sexcom", "shimmie2", - "simpcity", "simplyhentai", "sizebooru", "skeb", @@ -190,6 +199,7 @@ modules = [ "speakerdeck", "steamgriddb", "subscribestar", + "sxypix", "szurubooru", "tapas", "tcbscans", @@ -221,11 +231,13 @@ modules = [ "webmshare", "webtoons", "weebcentral", + "weebdex", "weibo", "wikiart", "wikifeet", "wikimedia", "xasiat", + "xenforo", "xfolio", "xhamster", "xvideos", @@ -299,7 +311,7 @@ def _list_classes(): def _modules_internal(): globals_ = globals() for module_name in modules: - yield __import__(module_name, globals_, None, (), 1) + yield __import__(module_name, globals_, None, None, 1) def _modules_path(path, files): diff --git a/gallery_dl/extractor/adultempire.py b/gallery_dl/extractor/adultempire.py index 3249ae6..e9adf97 100644 --- a/gallery_dl/extractor/adultempire.py +++ b/gallery_dl/extractor/adultempire.py @@ -33,7 +33,7 @@ class AdultempireGalleryExtractor(GalleryExtractor): "gallery_id": text.parse_int(self.gallery_id), "title" : text.unescape(extr('title="', '"')), "studio" : extr(">studio", "<").strip(), - "date" : text.parse_datetime(extr( + "date" : self.parse_datetime(extr( ">released", "<").strip(), "%m/%d/%Y"), "actors" : sorted(text.split_html(extr( '