mirror of
https://github.com/unshackle-dl/unshackle.git
synced 2025-10-23 15:11:08 +00:00
Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1d4e8bf9ec | ||
|
|
b4a1f2236e | ||
|
|
3277ab0d77 | ||
|
|
be0f7299f8 | ||
|
|
948ef30de7 | ||
|
|
1bd63ddc91 | ||
|
|
4dff597af2 | ||
|
|
8dbdde697d | ||
|
|
63c697f082 | ||
|
|
3e0835d9fb | ||
|
|
c6c83ee43b | ||
|
|
507690834b | ||
|
|
f8a58d966b | ||
|
|
8d12b735ff | ||
|
|
1aaea23669 | ||
|
|
e3571b9518 | ||
|
|
b478a00519 | ||
|
|
24fb8fb00c | ||
|
|
63e9a78b2a | ||
|
|
a2bfe47993 | ||
|
|
cf4dc1ce76 | ||
|
|
40028c81d7 | ||
|
|
06df10cb58 | ||
|
|
d61bec4a8c | ||
|
|
058bb60502 | ||
|
|
7583129e8f | ||
|
|
4691694d2e | ||
|
|
a07345a0a2 | ||
|
|
091d7335a3 | ||
|
|
8c798b95c4 | ||
|
|
46c28fe943 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,6 +1,7 @@
|
|||||||
# unshackle
|
# unshackle
|
||||||
unshackle.yaml
|
unshackle.yaml
|
||||||
unshackle.yml
|
unshackle.yml
|
||||||
|
update_check.json
|
||||||
*.mkv
|
*.mkv
|
||||||
*.mp4
|
*.mp4
|
||||||
*.exe
|
*.exe
|
||||||
|
|||||||
102
CHANGELOG.md
102
CHANGELOG.md
@@ -5,6 +5,108 @@ All notable changes to this project will be documented in this file.
|
|||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||||
|
|
||||||
|
## [1.4.0] - 2025-08-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **HLG Transfer Characteristics Preservation**: Enhanced video muxing to preserve HLG color metadata
|
||||||
|
- Added automatic detection of HLG video tracks during muxing process
|
||||||
|
- Implemented `--color-transfer-characteristics 0:18` argument for mkvmerge when processing HLG content
|
||||||
|
- Prevents incorrect conversion from HLG (18) to BT.2020 (14) transfer characteristics
|
||||||
|
- Ensures proper HLG playback support on compatible hardware without manual editing
|
||||||
|
- **Original Language Support**: Enhanced language selection with 'orig' keyword support
|
||||||
|
- Added support for 'orig' language selector for both video and audio tracks
|
||||||
|
- Automatically detects and uses the title's original language when 'orig' is specified
|
||||||
|
- Improved language processing logic with better duplicate handling
|
||||||
|
- Enhanced help text to document original language selection usage
|
||||||
|
- **Forced Subtitle Support**: Added option to include forced subtitle tracks
|
||||||
|
- New functionality to download and include forced subtitle tracks alongside regular subtitles
|
||||||
|
- **WebVTT Subtitle Filtering**: Enhanced subtitle processing capabilities
|
||||||
|
- Added filtering for unwanted cues in WebVTT subtitles
|
||||||
|
- Improved subtitle quality by removing unnecessary metadata
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- **DRM Track Decryption**: Improved DRM decryption track selection logic
|
||||||
|
- Enhanced `get_drm_for_cdm()` method usage for better DRM-CDM matching
|
||||||
|
- Added warning messages when no matching DRM is found for tracks
|
||||||
|
- Improved error handling and logging for DRM decryption failures
|
||||||
|
- **Series Tree Representation**: Enhanced episode tree display formatting
|
||||||
|
- Updated series tree to show season breakdown with episode counts
|
||||||
|
- Improved visual representation with "S{season}({count})" format
|
||||||
|
- Better organization of series information in console output
|
||||||
|
- **Hybrid Processing UI**: Enhanced extraction and conversion processes
|
||||||
|
- Added dynamic spinning bars to follow the rest of the codebase design
|
||||||
|
- Improved visual feedback during hybrid HDR processing operations
|
||||||
|
- **Track Selection Logic**: Enhanced multi-track selection capabilities
|
||||||
|
- Fixed track selection to support combining -V, -A, -S flags properly
|
||||||
|
- Improved flexibility in selecting multiple track types simultaneously
|
||||||
|
- **Service Subtitle Support**: Added configuration for services without subtitle support
|
||||||
|
- Services can now indicate if they don't support subtitle downloads
|
||||||
|
- Prevents unnecessary subtitle download attempts for unsupported services
|
||||||
|
- **Update Checker**: Enhanced update checking logic and cache handling
|
||||||
|
- Improved rate limiting and caching mechanisms for update checks
|
||||||
|
- Better performance and reduced API calls to GitHub
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- **PlayReady KID Extraction**: Enhanced KID extraction from PSSH data
|
||||||
|
- Added base64 support and XML parsing for better KID detection
|
||||||
|
- Fixed issue where only one KID was being extracted for certain services
|
||||||
|
- Improved multi-KID support for PlayReady protected content
|
||||||
|
- **Dolby Vision Detection**: Improved DV codec detection across all formats
|
||||||
|
- Fixed detection of dvhe.05.06 codec which was not being recognized correctly
|
||||||
|
- Enhanced detection logic in Episode and Movie title classes
|
||||||
|
- Better support for various Dolby Vision codec variants
|
||||||
|
|
||||||
|
## [1.3.0] - 2025-08-03
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **mp4decrypt Support**: Alternative DRM decryption method using mp4decrypt from Bento4
|
||||||
|
- Added `mp4decrypt` binary detection and support in binaries module
|
||||||
|
- New `decryption` configuration option in unshackle.yaml for service-specific decryption methods
|
||||||
|
- Enhanced PlayReady and Widevine DRM classes with mp4decrypt decryption support
|
||||||
|
- Service-specific decryption mapping allows choosing between `shaka` and `mp4decrypt` per service
|
||||||
|
- Improved error handling and progress reporting for mp4decrypt operations
|
||||||
|
- **Scene Naming Configuration**: New `scene_naming` option for controlling file naming conventions
|
||||||
|
- Added scene naming logic to movie, episode, and song title classes
|
||||||
|
- Configurable through unshackle.yaml to enable/disable scene naming standards
|
||||||
|
- **Terminal Cleanup and Signal Handling**: Enhanced console management
|
||||||
|
- Implemented proper terminal cleanup on application exit
|
||||||
|
- Added signal handling for graceful shutdown in ComfyConsole
|
||||||
|
- **Configuration Template**: New `unshackle-example.yaml` template file
|
||||||
|
- Replaced main `unshackle.yaml` with example template to prevent git conflicts
|
||||||
|
- Users can now modify their local config without affecting repository updates
|
||||||
|
- **Enhanced Credential Management**: Improved CDM and vault configuration
|
||||||
|
- Expanded credential management documentation in configuration
|
||||||
|
- Enhanced CDM configuration examples and guidelines
|
||||||
|
- **Video Transfer Standards**: Added `Unspecified_Image` option to Transfer enum
|
||||||
|
- Implements ITU-T H.Sup19 standard value 2 for image characteristics
|
||||||
|
- Supports still image coding systems and unknown transfer characteristics
|
||||||
|
- **Update Check Rate Limiting**: Enhanced update checking system
|
||||||
|
- Added configurable update check intervals to prevent excessive API calls
|
||||||
|
- Improved rate limiting for GitHub API requests
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
- **DRM Decryption Architecture**: Enhanced decryption system with dual method support
|
||||||
|
- Updated `dl.py` to handle service-specific decryption method selection
|
||||||
|
- Refactored `Config` class to manage decryption method mapping per service
|
||||||
|
- Enhanced DRM decrypt methods with `use_mp4decrypt` parameter for method selection
|
||||||
|
- **Error Handling**: Improved exception handling in Hybrid class
|
||||||
|
- Replaced log.exit calls with ValueError exceptions for better error propagation
|
||||||
|
- Enhanced error handling consistency across hybrid processing
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- **Proxy Configuration**: Fixed proxy server mapping in configuration
|
||||||
|
- Renamed 'servers' to 'server_map' in proxy configuration to resolve Nord/Surfshark naming conflicts
|
||||||
|
- Updated configuration structure for better compatibility with proxy providers
|
||||||
|
- **HTTP Vault**: Improved URL handling and key retrieval logic
|
||||||
|
- Fixed URL processing issues in HTTP-based key vaults
|
||||||
|
- Enhanced key retrieval reliability and error handling
|
||||||
|
|
||||||
## [1.2.0] - 2025-07-30
|
## [1.2.0] - 2025-07-30
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
31
CONFIG.md
31
CONFIG.md
@@ -213,6 +213,37 @@ downloader:
|
|||||||
|
|
||||||
The `default` entry is optional. If omitted, `requests` will be used for services not listed.
|
The `default` entry is optional. If omitted, `requests` will be used for services not listed.
|
||||||
|
|
||||||
|
## decryption (str | dict)
|
||||||
|
|
||||||
|
Choose what software to use to decrypt DRM-protected content throughout unshackle where needed.
|
||||||
|
You may provide a single decryption method globally or a mapping of service tags to
|
||||||
|
decryption methods.
|
||||||
|
|
||||||
|
Options:
|
||||||
|
|
||||||
|
- `shaka` (default) - Shaka Packager - <https://github.com/shaka-project/shaka-packager>
|
||||||
|
- `mp4decrypt` - mp4decrypt from Bento4 - <https://github.com/axiomatic-systems/Bento4>
|
||||||
|
|
||||||
|
Note that Shaka Packager is the traditional method and works with most services. mp4decrypt
|
||||||
|
is an alternative that may work better with certain services that have specific encryption formats.
|
||||||
|
|
||||||
|
Example mapping:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
decryption:
|
||||||
|
ATVP: mp4decrypt
|
||||||
|
AMZN: shaka
|
||||||
|
default: shaka
|
||||||
|
```
|
||||||
|
|
||||||
|
The `default` entry is optional. If omitted, `shaka` will be used for services not listed.
|
||||||
|
|
||||||
|
Simple configuration (single method for all services):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
decryption: mp4decrypt
|
||||||
|
```
|
||||||
|
|
||||||
## filenames (dict)
|
## filenames (dict)
|
||||||
|
|
||||||
Override the default filenames used across unshackle.
|
Override the default filenames used across unshackle.
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "unshackle"
|
name = "unshackle"
|
||||||
version = "1.2.0"
|
version = "1.4.0"
|
||||||
description = "Modular Movie, TV, and Music Archival Software."
|
description = "Modular Movie, TV, and Music Archival Software."
|
||||||
authors = [{ name = "unshackle team" }]
|
authors = [{ name = "unshackle team" }]
|
||||||
requires-python = ">=3.10,<3.13"
|
requires-python = ">=3.10,<3.13"
|
||||||
|
|||||||
@@ -139,7 +139,13 @@ class dl:
|
|||||||
default=None,
|
default=None,
|
||||||
help="Wanted episodes, e.g. `S01-S05,S07`, `S01E01-S02E03`, `S02-S02E03`, e.t.c, defaults to all.",
|
help="Wanted episodes, e.g. `S01-S05,S07`, `S01E01-S02E03`, `S02-S02E03`, e.t.c, defaults to all.",
|
||||||
)
|
)
|
||||||
@click.option("-l", "--lang", type=LANGUAGE_RANGE, default="en", help="Language wanted for Video and Audio.")
|
@click.option(
|
||||||
|
"-l",
|
||||||
|
"--lang",
|
||||||
|
type=LANGUAGE_RANGE,
|
||||||
|
default="en",
|
||||||
|
help="Language wanted for Video and Audio. Use 'orig' to select the original language, e.g. 'orig,en' for both original and English.",
|
||||||
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"-vl",
|
"-vl",
|
||||||
"--v-lang",
|
"--v-lang",
|
||||||
@@ -148,6 +154,7 @@ class dl:
|
|||||||
help="Language wanted for Video, you would use this if the video language doesn't match the audio.",
|
help="Language wanted for Video, you would use this if the video language doesn't match the audio.",
|
||||||
)
|
)
|
||||||
@click.option("-sl", "--s-lang", type=LANGUAGE_RANGE, default=["all"], help="Language wanted for Subtitles.")
|
@click.option("-sl", "--s-lang", type=LANGUAGE_RANGE, default=["all"], help="Language wanted for Subtitles.")
|
||||||
|
@click.option("-fs", "--forced-subs", is_flag=True, default=False, help="Include forced subtitle tracks.")
|
||||||
@click.option(
|
@click.option(
|
||||||
"--proxy",
|
"--proxy",
|
||||||
type=str,
|
type=str,
|
||||||
@@ -405,6 +412,7 @@ class dl:
|
|||||||
lang: list[str],
|
lang: list[str],
|
||||||
v_lang: list[str],
|
v_lang: list[str],
|
||||||
s_lang: list[str],
|
s_lang: list[str],
|
||||||
|
forced_subs: bool,
|
||||||
sub_format: Optional[Subtitle.Codec],
|
sub_format: Optional[Subtitle.Codec],
|
||||||
video_only: bool,
|
video_only: bool,
|
||||||
audio_only: bool,
|
audio_only: bool,
|
||||||
@@ -533,7 +541,12 @@ class dl:
|
|||||||
events.subscribe(events.Types.TRACK_REPACKED, service.on_track_repacked)
|
events.subscribe(events.Types.TRACK_REPACKED, service.on_track_repacked)
|
||||||
events.subscribe(events.Types.TRACK_MULTIPLEX, service.on_track_multiplex)
|
events.subscribe(events.Types.TRACK_MULTIPLEX, service.on_track_multiplex)
|
||||||
|
|
||||||
if no_subs:
|
if hasattr(service, "NO_SUBTITLES") and service.NO_SUBTITLES:
|
||||||
|
console.log("Skipping subtitles - service does not support subtitle downloads")
|
||||||
|
no_subs = True
|
||||||
|
s_lang = None
|
||||||
|
title.tracks.subtitles = []
|
||||||
|
elif no_subs:
|
||||||
console.log("Skipped subtitles as --no-subs was used...")
|
console.log("Skipped subtitles as --no-subs was used...")
|
||||||
s_lang = None
|
s_lang = None
|
||||||
title.tracks.subtitles = []
|
title.tracks.subtitles = []
|
||||||
@@ -560,8 +573,31 @@ class dl:
|
|||||||
)
|
)
|
||||||
|
|
||||||
with console.status("Sorting tracks by language and bitrate...", spinner="dots"):
|
with console.status("Sorting tracks by language and bitrate...", spinner="dots"):
|
||||||
title.tracks.sort_videos(by_language=v_lang or lang)
|
video_sort_lang = v_lang or lang
|
||||||
title.tracks.sort_audio(by_language=lang)
|
processed_video_sort_lang = []
|
||||||
|
for language in video_sort_lang:
|
||||||
|
if language == "orig":
|
||||||
|
if title.language:
|
||||||
|
orig_lang = str(title.language) if hasattr(title.language, "__str__") else title.language
|
||||||
|
if orig_lang not in processed_video_sort_lang:
|
||||||
|
processed_video_sort_lang.append(orig_lang)
|
||||||
|
else:
|
||||||
|
if language not in processed_video_sort_lang:
|
||||||
|
processed_video_sort_lang.append(language)
|
||||||
|
|
||||||
|
processed_audio_sort_lang = []
|
||||||
|
for language in lang:
|
||||||
|
if language == "orig":
|
||||||
|
if title.language:
|
||||||
|
orig_lang = str(title.language) if hasattr(title.language, "__str__") else title.language
|
||||||
|
if orig_lang not in processed_audio_sort_lang:
|
||||||
|
processed_audio_sort_lang.append(orig_lang)
|
||||||
|
else:
|
||||||
|
if language not in processed_audio_sort_lang:
|
||||||
|
processed_audio_sort_lang.append(language)
|
||||||
|
|
||||||
|
title.tracks.sort_videos(by_language=processed_video_sort_lang)
|
||||||
|
title.tracks.sort_audio(by_language=processed_audio_sort_lang)
|
||||||
title.tracks.sort_subtitles(by_language=s_lang)
|
title.tracks.sort_subtitles(by_language=s_lang)
|
||||||
|
|
||||||
if list_:
|
if list_:
|
||||||
@@ -592,12 +628,27 @@ class dl:
|
|||||||
self.log.error(f"There's no {vbitrate}kbps Video Track...")
|
self.log.error(f"There's no {vbitrate}kbps Video Track...")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# Filter out "best" from the video languages list.
|
|
||||||
video_languages = [lang for lang in (v_lang or lang) if lang != "best"]
|
video_languages = [lang for lang in (v_lang or lang) if lang != "best"]
|
||||||
if video_languages and "all" not in video_languages:
|
if video_languages and "all" not in video_languages:
|
||||||
title.tracks.videos = title.tracks.by_language(title.tracks.videos, video_languages)
|
processed_video_lang = []
|
||||||
|
for language in video_languages:
|
||||||
|
if language == "orig":
|
||||||
|
if title.language:
|
||||||
|
orig_lang = (
|
||||||
|
str(title.language) if hasattr(title.language, "__str__") else title.language
|
||||||
|
)
|
||||||
|
if orig_lang not in processed_video_lang:
|
||||||
|
processed_video_lang.append(orig_lang)
|
||||||
|
else:
|
||||||
|
self.log.warning(
|
||||||
|
"Original language not available for title, skipping 'orig' selection for video"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if language not in processed_video_lang:
|
||||||
|
processed_video_lang.append(language)
|
||||||
|
title.tracks.videos = title.tracks.by_language(title.tracks.videos, processed_video_lang)
|
||||||
if not title.tracks.videos:
|
if not title.tracks.videos:
|
||||||
self.log.error(f"There's no {video_languages} Video Track...")
|
self.log.error(f"There's no {processed_video_lang} Video Track...")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if quality:
|
if quality:
|
||||||
@@ -672,6 +723,7 @@ class dl:
|
|||||||
self.log.error(f"There's no {s_lang} Subtitle Track...")
|
self.log.error(f"There's no {s_lang} Subtitle Track...")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not forced_subs:
|
||||||
title.tracks.select_subtitles(lambda x: not x.forced or is_close_match(x.language, lang))
|
title.tracks.select_subtitles(lambda x: not x.forced or is_close_match(x.language, lang))
|
||||||
|
|
||||||
# filter audio tracks
|
# filter audio tracks
|
||||||
@@ -699,8 +751,24 @@ class dl:
|
|||||||
self.log.error(f"There's no {abitrate}kbps Audio Track...")
|
self.log.error(f"There's no {abitrate}kbps Audio Track...")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
if lang:
|
if lang:
|
||||||
if "best" in lang:
|
processed_lang = []
|
||||||
# Get unique languages and select highest quality for each
|
for language in lang:
|
||||||
|
if language == "orig":
|
||||||
|
if title.language:
|
||||||
|
orig_lang = (
|
||||||
|
str(title.language) if hasattr(title.language, "__str__") else title.language
|
||||||
|
)
|
||||||
|
if orig_lang not in processed_lang:
|
||||||
|
processed_lang.append(orig_lang)
|
||||||
|
else:
|
||||||
|
self.log.warning(
|
||||||
|
"Original language not available for title, skipping 'orig' selection"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if language not in processed_lang:
|
||||||
|
processed_lang.append(language)
|
||||||
|
|
||||||
|
if "best" in processed_lang:
|
||||||
unique_languages = {track.language for track in title.tracks.audio}
|
unique_languages = {track.language for track in title.tracks.audio}
|
||||||
selected_audio = []
|
selected_audio = []
|
||||||
for language in unique_languages:
|
for language in unique_languages:
|
||||||
@@ -710,30 +778,36 @@ class dl:
|
|||||||
)
|
)
|
||||||
selected_audio.append(highest_quality)
|
selected_audio.append(highest_quality)
|
||||||
title.tracks.audio = selected_audio
|
title.tracks.audio = selected_audio
|
||||||
elif "all" not in lang:
|
elif "all" not in processed_lang:
|
||||||
title.tracks.audio = title.tracks.by_language(title.tracks.audio, lang, per_language=1)
|
per_language = 0 if len(processed_lang) > 1 else 1
|
||||||
|
title.tracks.audio = title.tracks.by_language(
|
||||||
|
title.tracks.audio, processed_lang, per_language=per_language
|
||||||
|
)
|
||||||
if not title.tracks.audio:
|
if not title.tracks.audio:
|
||||||
self.log.error(f"There's no {lang} Audio Track, cannot continue...")
|
self.log.error(f"There's no {processed_lang} Audio Track, cannot continue...")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if video_only or audio_only or subs_only or chapters_only or no_subs or no_audio or no_chapters:
|
if video_only or audio_only or subs_only or chapters_only or no_subs or no_audio or no_chapters:
|
||||||
# Determine which track types to keep based on the flags
|
keep_videos = False
|
||||||
|
keep_audio = False
|
||||||
|
keep_subtitles = False
|
||||||
|
keep_chapters = False
|
||||||
|
|
||||||
|
if video_only or audio_only or subs_only or chapters_only:
|
||||||
|
if video_only:
|
||||||
|
keep_videos = True
|
||||||
|
if audio_only:
|
||||||
|
keep_audio = True
|
||||||
|
if subs_only:
|
||||||
|
keep_subtitles = True
|
||||||
|
if chapters_only:
|
||||||
|
keep_chapters = True
|
||||||
|
else:
|
||||||
keep_videos = True
|
keep_videos = True
|
||||||
keep_audio = True
|
keep_audio = True
|
||||||
keep_subtitles = True
|
keep_subtitles = True
|
||||||
keep_chapters = True
|
keep_chapters = True
|
||||||
|
|
||||||
# Handle exclusive flags (only keep one type)
|
|
||||||
if video_only:
|
|
||||||
keep_audio = keep_subtitles = keep_chapters = False
|
|
||||||
elif audio_only:
|
|
||||||
keep_videos = keep_subtitles = keep_chapters = False
|
|
||||||
elif subs_only:
|
|
||||||
keep_videos = keep_audio = keep_chapters = False
|
|
||||||
elif chapters_only:
|
|
||||||
keep_videos = keep_audio = keep_subtitles = False
|
|
||||||
|
|
||||||
# Handle exclusion flags (remove specific types)
|
|
||||||
if no_subs:
|
if no_subs:
|
||||||
keep_subtitles = False
|
keep_subtitles = False
|
||||||
if no_audio:
|
if no_audio:
|
||||||
@@ -741,7 +815,6 @@ class dl:
|
|||||||
if no_chapters:
|
if no_chapters:
|
||||||
keep_chapters = False
|
keep_chapters = False
|
||||||
|
|
||||||
# Build the kept_tracks list without duplicates
|
|
||||||
kept_tracks = []
|
kept_tracks = []
|
||||||
if keep_videos:
|
if keep_videos:
|
||||||
kept_tracks.extend(title.tracks.videos)
|
kept_tracks.extend(title.tracks.videos)
|
||||||
@@ -838,6 +911,7 @@ class dl:
|
|||||||
while (
|
while (
|
||||||
not title.tracks.subtitles
|
not title.tracks.subtitles
|
||||||
and not no_subs
|
and not no_subs
|
||||||
|
and not (hasattr(service, "NO_SUBTITLES") and service.NO_SUBTITLES)
|
||||||
and not video_only
|
and not video_only
|
||||||
and len(title.tracks.videos) > video_track_n
|
and len(title.tracks.videos) > video_track_n
|
||||||
and any(
|
and any(
|
||||||
@@ -911,6 +985,34 @@ class dl:
|
|||||||
if font_count:
|
if font_count:
|
||||||
self.log.info(f"Attached {font_count} fonts for the Subtitles")
|
self.log.info(f"Attached {font_count} fonts for the Subtitles")
|
||||||
|
|
||||||
|
# Handle DRM decryption BEFORE repacking (must decrypt first!)
|
||||||
|
service_name = service.__class__.__name__.upper()
|
||||||
|
decryption_method = config.decryption_map.get(service_name, config.decryption)
|
||||||
|
use_mp4decrypt = decryption_method.lower() == "mp4decrypt"
|
||||||
|
|
||||||
|
if use_mp4decrypt:
|
||||||
|
decrypt_tool = "mp4decrypt"
|
||||||
|
else:
|
||||||
|
decrypt_tool = "Shaka Packager"
|
||||||
|
|
||||||
|
drm_tracks = [track for track in title.tracks if track.drm]
|
||||||
|
if drm_tracks:
|
||||||
|
with console.status(f"Decrypting tracks with {decrypt_tool}..."):
|
||||||
|
has_decrypted = False
|
||||||
|
for track in drm_tracks:
|
||||||
|
drm = track.get_drm_for_cdm(self.cdm)
|
||||||
|
if drm and hasattr(drm, "decrypt"):
|
||||||
|
drm.decrypt(track.path, use_mp4decrypt=use_mp4decrypt)
|
||||||
|
has_decrypted = True
|
||||||
|
events.emit(events.Types.TRACK_REPACKED, track=track)
|
||||||
|
else:
|
||||||
|
self.log.warning(
|
||||||
|
f"No matching DRM found for track {track} with CDM type {type(self.cdm).__name__}"
|
||||||
|
)
|
||||||
|
if has_decrypted:
|
||||||
|
self.log.info(f"Decrypted tracks with {decrypt_tool}")
|
||||||
|
|
||||||
|
# Now repack the decrypted tracks
|
||||||
with console.status("Repackaging tracks with FFMPEG..."):
|
with console.status("Repackaging tracks with FFMPEG..."):
|
||||||
has_repacked = False
|
has_repacked = False
|
||||||
for track in title.tracks:
|
for track in title.tracks:
|
||||||
|
|||||||
@@ -45,6 +45,13 @@ def check() -> None:
|
|||||||
"desc": "DRM decryption",
|
"desc": "DRM decryption",
|
||||||
"cat": "DRM",
|
"cat": "DRM",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "mp4decrypt",
|
||||||
|
"binary": binaries.Mp4decrypt,
|
||||||
|
"required": False,
|
||||||
|
"desc": "DRM decryption",
|
||||||
|
"cat": "DRM",
|
||||||
|
},
|
||||||
# HDR Processing
|
# HDR Processing
|
||||||
{"name": "dovi_tool", "binary": binaries.DoviTool, "required": False, "desc": "Dolby Vision", "cat": "HDR"},
|
{"name": "dovi_tool", "binary": binaries.DoviTool, "required": False, "desc": "Dolby Vision", "cat": "HDR"},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
__version__ = "1.2.0"
|
__version__ = "1.4.0"
|
||||||
|
|||||||
@@ -53,6 +53,7 @@ MKVToolNix = find("mkvmerge")
|
|||||||
Mkvpropedit = find("mkvpropedit")
|
Mkvpropedit = find("mkvpropedit")
|
||||||
DoviTool = find("dovi_tool")
|
DoviTool = find("dovi_tool")
|
||||||
HDR10PlusTool = find("hdr10plus_tool", "HDR10Plus_tool")
|
HDR10PlusTool = find("hdr10plus_tool", "HDR10Plus_tool")
|
||||||
|
Mp4decrypt = find("mp4decrypt")
|
||||||
|
|
||||||
|
|
||||||
__all__ = (
|
__all__ = (
|
||||||
@@ -71,5 +72,6 @@ __all__ = (
|
|||||||
"Mkvpropedit",
|
"Mkvpropedit",
|
||||||
"DoviTool",
|
"DoviTool",
|
||||||
"HDR10PlusTool",
|
"HDR10PlusTool",
|
||||||
|
"Mp4decrypt",
|
||||||
"find",
|
"find",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -75,10 +75,20 @@ class Config:
|
|||||||
self.proxy_providers: dict = kwargs.get("proxy_providers") or {}
|
self.proxy_providers: dict = kwargs.get("proxy_providers") or {}
|
||||||
self.serve: dict = kwargs.get("serve") or {}
|
self.serve: dict = kwargs.get("serve") or {}
|
||||||
self.services: dict = kwargs.get("services") or {}
|
self.services: dict = kwargs.get("services") or {}
|
||||||
|
decryption_cfg = kwargs.get("decryption") or {}
|
||||||
|
if isinstance(decryption_cfg, dict):
|
||||||
|
self.decryption_map = {k.upper(): v for k, v in decryption_cfg.items()}
|
||||||
|
self.decryption = self.decryption_map.get("DEFAULT", "shaka")
|
||||||
|
else:
|
||||||
|
self.decryption_map = {}
|
||||||
|
self.decryption = decryption_cfg or "shaka"
|
||||||
|
|
||||||
self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False)
|
self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False)
|
||||||
self.tag: str = kwargs.get("tag") or ""
|
self.tag: str = kwargs.get("tag") or ""
|
||||||
self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or ""
|
self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or ""
|
||||||
self.update_checks: bool = kwargs.get("update_checks", True)
|
self.update_checks: bool = kwargs.get("update_checks", True)
|
||||||
|
self.update_check_interval: int = kwargs.get("update_check_interval", 24)
|
||||||
|
self.scene_naming: bool = kwargs.get("scene_naming", True)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_yaml(cls, path: Path) -> Config:
|
def from_yaml(cls, path: Path) -> Config:
|
||||||
|
|||||||
@@ -39,7 +39,13 @@ class PlayReady:
|
|||||||
if not isinstance(pssh, PSSH):
|
if not isinstance(pssh, PSSH):
|
||||||
raise TypeError(f"Expected pssh to be a {PSSH}, not {pssh!r}")
|
raise TypeError(f"Expected pssh to be a {PSSH}, not {pssh!r}")
|
||||||
|
|
||||||
kids: list[UUID] = []
|
if pssh_b64:
|
||||||
|
kids = self._extract_kids_from_pssh_b64(pssh_b64)
|
||||||
|
else:
|
||||||
|
kids = []
|
||||||
|
|
||||||
|
# Extract KIDs using pyplayready's method (may miss some KIDs)
|
||||||
|
if not kids:
|
||||||
for header in pssh.wrm_headers:
|
for header in pssh.wrm_headers:
|
||||||
try:
|
try:
|
||||||
signed_ids, _, _, _ = header.read_attributes()
|
signed_ids, _, _, _ = header.read_attributes()
|
||||||
@@ -72,6 +78,66 @@ class PlayReady:
|
|||||||
if pssh_b64:
|
if pssh_b64:
|
||||||
self.data.setdefault("pssh_b64", pssh_b64)
|
self.data.setdefault("pssh_b64", pssh_b64)
|
||||||
|
|
||||||
|
def _extract_kids_from_pssh_b64(self, pssh_b64: str) -> list[UUID]:
|
||||||
|
"""Extract all KIDs from base64-encoded PSSH data."""
|
||||||
|
try:
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
# Decode the PSSH
|
||||||
|
pssh_bytes = base64.b64decode(pssh_b64)
|
||||||
|
|
||||||
|
# Try to find XML in the PSSH data
|
||||||
|
# PlayReady PSSH usually has XML embedded in it
|
||||||
|
pssh_str = pssh_bytes.decode("utf-16le", errors="ignore")
|
||||||
|
|
||||||
|
# Find WRMHEADER
|
||||||
|
xml_start = pssh_str.find("<WRMHEADER")
|
||||||
|
if xml_start == -1:
|
||||||
|
# Try UTF-8
|
||||||
|
pssh_str = pssh_bytes.decode("utf-8", errors="ignore")
|
||||||
|
xml_start = pssh_str.find("<WRMHEADER")
|
||||||
|
|
||||||
|
if xml_start != -1:
|
||||||
|
clean_xml = pssh_str[xml_start:]
|
||||||
|
xml_end = clean_xml.find("</WRMHEADER>") + len("</WRMHEADER>")
|
||||||
|
clean_xml = clean_xml[:xml_end]
|
||||||
|
|
||||||
|
root = ET.fromstring(clean_xml)
|
||||||
|
ns = {"pr": "http://schemas.microsoft.com/DRM/2007/03/PlayReadyHeader"}
|
||||||
|
|
||||||
|
kids = []
|
||||||
|
|
||||||
|
# Extract from CUSTOMATTRIBUTES/KIDS
|
||||||
|
kid_elements = root.findall(".//pr:CUSTOMATTRIBUTES/pr:KIDS/pr:KID", ns)
|
||||||
|
for kid_elem in kid_elements:
|
||||||
|
value = kid_elem.get("VALUE")
|
||||||
|
if value:
|
||||||
|
try:
|
||||||
|
kid_bytes = base64.b64decode(value + "==")
|
||||||
|
kid_uuid = UUID(bytes_le=kid_bytes)
|
||||||
|
kids.append(kid_uuid)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Also get individual KID
|
||||||
|
individual_kids = root.findall(".//pr:DATA/pr:KID", ns)
|
||||||
|
for kid_elem in individual_kids:
|
||||||
|
if kid_elem.text:
|
||||||
|
try:
|
||||||
|
kid_bytes = base64.b64decode(kid_elem.text.strip() + "==")
|
||||||
|
kid_uuid = UUID(bytes_le=kid_bytes)
|
||||||
|
if kid_uuid not in kids:
|
||||||
|
kids.append(kid_uuid)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return kids
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_track(cls, track: AnyTrack, session: Optional[Session] = None) -> PlayReady:
|
def from_track(cls, track: AnyTrack, session: Optional[Session] = None) -> PlayReady:
|
||||||
if not session:
|
if not session:
|
||||||
@@ -187,14 +253,69 @@ class PlayReady:
|
|||||||
if not self.content_keys:
|
if not self.content_keys:
|
||||||
raise PlayReady.Exceptions.EmptyLicense("No Content Keys were within the License")
|
raise PlayReady.Exceptions.EmptyLicense("No Content Keys were within the License")
|
||||||
|
|
||||||
def decrypt(self, path: Path) -> None:
|
def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
|
||||||
|
"""
|
||||||
|
Decrypt a Track with PlayReady DRM.
|
||||||
|
Args:
|
||||||
|
path: Path to the encrypted file to decrypt
|
||||||
|
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
|
||||||
|
Raises:
|
||||||
|
EnvironmentError if the required decryption executable could not be found.
|
||||||
|
ValueError if the track has not yet been downloaded.
|
||||||
|
SubprocessError if the decryption process returned a non-zero exit code.
|
||||||
|
"""
|
||||||
if not self.content_keys:
|
if not self.content_keys:
|
||||||
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
||||||
if not binaries.ShakaPackager:
|
|
||||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
|
||||||
if not path or not path.exists():
|
if not path or not path.exists():
|
||||||
raise ValueError("Tried to decrypt a file that does not exist.")
|
raise ValueError("Tried to decrypt a file that does not exist.")
|
||||||
|
|
||||||
|
if use_mp4decrypt:
|
||||||
|
return self._decrypt_with_mp4decrypt(path)
|
||||||
|
else:
|
||||||
|
return self._decrypt_with_shaka_packager(path)
|
||||||
|
|
||||||
|
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
|
||||||
|
"""Decrypt using mp4decrypt"""
|
||||||
|
if not binaries.Mp4decrypt:
|
||||||
|
raise EnvironmentError("mp4decrypt executable not found but is required.")
|
||||||
|
|
||||||
|
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||||
|
|
||||||
|
# Build key arguments
|
||||||
|
key_args = []
|
||||||
|
for kid, key in self.content_keys.items():
|
||||||
|
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
|
||||||
|
key_hex = key if isinstance(key, str) else key.hex()
|
||||||
|
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
str(binaries.Mp4decrypt),
|
||||||
|
"--show-progress",
|
||||||
|
*key_args,
|
||||||
|
str(path),
|
||||||
|
str(output_path),
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
|
||||||
|
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
|
||||||
|
|
||||||
|
if not output_path.exists():
|
||||||
|
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
|
||||||
|
if output_path.stat().st_size == 0:
|
||||||
|
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
|
||||||
|
|
||||||
|
path.unlink()
|
||||||
|
shutil.move(output_path, path)
|
||||||
|
|
||||||
|
def _decrypt_with_shaka_packager(self, path: Path) -> None:
|
||||||
|
"""Decrypt using Shaka Packager (original method)"""
|
||||||
|
if not binaries.ShakaPackager:
|
||||||
|
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||||
|
|
||||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|||||||
@@ -227,22 +227,69 @@ class Widevine:
|
|||||||
finally:
|
finally:
|
||||||
cdm.close(session_id)
|
cdm.close(session_id)
|
||||||
|
|
||||||
def decrypt(self, path: Path) -> None:
|
def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Decrypt a Track with Widevine DRM.
|
Decrypt a Track with Widevine DRM.
|
||||||
|
Args:
|
||||||
|
path: Path to the encrypted file to decrypt
|
||||||
|
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
|
||||||
Raises:
|
Raises:
|
||||||
EnvironmentError if the Shaka Packager executable could not be found.
|
EnvironmentError if the required decryption executable could not be found.
|
||||||
ValueError if the track has not yet been downloaded.
|
ValueError if the track has not yet been downloaded.
|
||||||
SubprocessError if Shaka Packager returned a non-zero exit code.
|
SubprocessError if the decryption process returned a non-zero exit code.
|
||||||
"""
|
"""
|
||||||
if not self.content_keys:
|
if not self.content_keys:
|
||||||
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
||||||
|
|
||||||
if not binaries.ShakaPackager:
|
|
||||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
|
||||||
if not path or not path.exists():
|
if not path or not path.exists():
|
||||||
raise ValueError("Tried to decrypt a file that does not exist.")
|
raise ValueError("Tried to decrypt a file that does not exist.")
|
||||||
|
|
||||||
|
if use_mp4decrypt:
|
||||||
|
return self._decrypt_with_mp4decrypt(path)
|
||||||
|
else:
|
||||||
|
return self._decrypt_with_shaka_packager(path)
|
||||||
|
|
||||||
|
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
|
||||||
|
"""Decrypt using mp4decrypt"""
|
||||||
|
if not binaries.Mp4decrypt:
|
||||||
|
raise EnvironmentError("mp4decrypt executable not found but is required.")
|
||||||
|
|
||||||
|
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||||
|
|
||||||
|
# Build key arguments
|
||||||
|
key_args = []
|
||||||
|
for kid, key in self.content_keys.items():
|
||||||
|
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
|
||||||
|
key_hex = key if isinstance(key, str) else key.hex()
|
||||||
|
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
str(binaries.Mp4decrypt),
|
||||||
|
"--show-progress",
|
||||||
|
*key_args,
|
||||||
|
str(path),
|
||||||
|
str(output_path),
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
|
||||||
|
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
|
||||||
|
|
||||||
|
if not output_path.exists():
|
||||||
|
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
|
||||||
|
if output_path.stat().st_size == 0:
|
||||||
|
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
|
||||||
|
|
||||||
|
path.unlink()
|
||||||
|
shutil.move(output_path, path)
|
||||||
|
|
||||||
|
def _decrypt_with_shaka_packager(self, path: Path) -> None:
|
||||||
|
"""Decrypt using Shaka Packager (original method)"""
|
||||||
|
if not binaries.ShakaPackager:
|
||||||
|
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||||
|
|
||||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|||||||
@@ -107,10 +107,13 @@ class Episode(Title):
|
|||||||
name=self.name or "",
|
name=self.name or "",
|
||||||
).strip()
|
).strip()
|
||||||
|
|
||||||
|
if config.scene_naming:
|
||||||
# Resolution
|
# Resolution
|
||||||
if primary_video_track:
|
if primary_video_track:
|
||||||
resolution = primary_video_track.height
|
resolution = primary_video_track.height
|
||||||
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
|
aspect_ratio = [
|
||||||
|
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
|
||||||
|
]
|
||||||
if len(aspect_ratio) == 1:
|
if len(aspect_ratio) == 1:
|
||||||
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
||||||
aspect_ratio.append(1)
|
aspect_ratio.append(1)
|
||||||
@@ -144,7 +147,9 @@ class Episode(Title):
|
|||||||
codec = primary_audio_track.format
|
codec = primary_audio_track.format
|
||||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||||
if channel_layout:
|
if channel_layout:
|
||||||
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
|
channels = float(
|
||||||
|
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||||
channels = float(channel_count)
|
channels = float(channel_count)
|
||||||
@@ -158,12 +163,16 @@ class Episode(Title):
|
|||||||
if primary_video_track:
|
if primary_video_track:
|
||||||
codec = primary_video_track.format
|
codec = primary_video_track.format
|
||||||
hdr_format = primary_video_track.hdr_format_commercial
|
hdr_format = primary_video_track.hdr_format_commercial
|
||||||
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
|
trc = (
|
||||||
|
primary_video_track.transfer_characteristics
|
||||||
|
or primary_video_track.transfer_characteristics_original
|
||||||
|
)
|
||||||
frame_rate = float(primary_video_track.frame_rate)
|
frame_rate = float(primary_video_track.frame_rate)
|
||||||
if hdr_format:
|
if hdr_format:
|
||||||
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
||||||
if (primary_video_track.hdr_format_commercial) != "Dolby Vision":
|
name += " DV"
|
||||||
name += f" DV {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
if DYNAMIC_RANGE_MAP.get(hdr_format) and DYNAMIC_RANGE_MAP.get(hdr_format) != "DV":
|
||||||
|
name += " HDR"
|
||||||
else:
|
else:
|
||||||
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||||
elif trc and "HLG" in trc:
|
elif trc and "HLG" in trc:
|
||||||
@@ -176,6 +185,9 @@ class Episode(Title):
|
|||||||
name += f"-{config.tag}"
|
name += f"-{config.tag}"
|
||||||
|
|
||||||
return sanitize_filename(name)
|
return sanitize_filename(name)
|
||||||
|
else:
|
||||||
|
# Simple naming style without technical details - use spaces instead of dots
|
||||||
|
return sanitize_filename(name, " ")
|
||||||
|
|
||||||
|
|
||||||
class Series(SortedKeyList, ABC):
|
class Series(SortedKeyList, ABC):
|
||||||
@@ -190,9 +202,10 @@ class Series(SortedKeyList, ABC):
|
|||||||
def tree(self, verbose: bool = False) -> Tree:
|
def tree(self, verbose: bool = False) -> Tree:
|
||||||
seasons = Counter(x.season for x in self)
|
seasons = Counter(x.season for x in self)
|
||||||
num_seasons = len(seasons)
|
num_seasons = len(seasons)
|
||||||
num_episodes = sum(seasons.values())
|
sum(seasons.values())
|
||||||
|
season_breakdown = ", ".join(f"S{season}({count})" for season, count in sorted(seasons.items()))
|
||||||
tree = Tree(
|
tree = Tree(
|
||||||
f"{num_seasons} Season{['s', ''][num_seasons == 1]}, {num_episodes} Episode{['s', ''][num_episodes == 1]}",
|
f"{num_seasons} seasons, {season_breakdown}",
|
||||||
guide_style="bright_black",
|
guide_style="bright_black",
|
||||||
)
|
)
|
||||||
if verbose:
|
if verbose:
|
||||||
|
|||||||
@@ -58,10 +58,13 @@ class Movie(Title):
|
|||||||
# Name (Year)
|
# Name (Year)
|
||||||
name = str(self).replace("$", "S") # e.g., Arli$$
|
name = str(self).replace("$", "S") # e.g., Arli$$
|
||||||
|
|
||||||
|
if config.scene_naming:
|
||||||
# Resolution
|
# Resolution
|
||||||
if primary_video_track:
|
if primary_video_track:
|
||||||
resolution = primary_video_track.height
|
resolution = primary_video_track.height
|
||||||
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
|
aspect_ratio = [
|
||||||
|
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
|
||||||
|
]
|
||||||
if len(aspect_ratio) == 1:
|
if len(aspect_ratio) == 1:
|
||||||
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
||||||
aspect_ratio.append(1)
|
aspect_ratio.append(1)
|
||||||
@@ -95,7 +98,9 @@ class Movie(Title):
|
|||||||
codec = primary_audio_track.format
|
codec = primary_audio_track.format
|
||||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||||
if channel_layout:
|
if channel_layout:
|
||||||
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
|
channels = float(
|
||||||
|
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||||
channels = float(channel_count)
|
channels = float(channel_count)
|
||||||
@@ -109,12 +114,16 @@ class Movie(Title):
|
|||||||
if primary_video_track:
|
if primary_video_track:
|
||||||
codec = primary_video_track.format
|
codec = primary_video_track.format
|
||||||
hdr_format = primary_video_track.hdr_format_commercial
|
hdr_format = primary_video_track.hdr_format_commercial
|
||||||
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
|
trc = (
|
||||||
|
primary_video_track.transfer_characteristics
|
||||||
|
or primary_video_track.transfer_characteristics_original
|
||||||
|
)
|
||||||
frame_rate = float(primary_video_track.frame_rate)
|
frame_rate = float(primary_video_track.frame_rate)
|
||||||
if hdr_format:
|
if hdr_format:
|
||||||
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
||||||
if (primary_video_track.hdr_format_commercial) != "Dolby Vision":
|
name += " DV"
|
||||||
name += f" DV {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
if DYNAMIC_RANGE_MAP.get(hdr_format) and DYNAMIC_RANGE_MAP.get(hdr_format) != "DV":
|
||||||
|
name += " HDR"
|
||||||
else:
|
else:
|
||||||
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||||
elif trc and "HLG" in trc:
|
elif trc and "HLG" in trc:
|
||||||
@@ -127,6 +136,9 @@ class Movie(Title):
|
|||||||
name += f"-{config.tag}"
|
name += f"-{config.tag}"
|
||||||
|
|
||||||
return sanitize_filename(name)
|
return sanitize_filename(name)
|
||||||
|
else:
|
||||||
|
# Simple naming style without technical details - use spaces instead of dots
|
||||||
|
return sanitize_filename(name, " ")
|
||||||
|
|
||||||
|
|
||||||
class Movies(SortedKeyList, ABC):
|
class Movies(SortedKeyList, ABC):
|
||||||
|
|||||||
@@ -100,6 +100,7 @@ class Song(Title):
|
|||||||
# NN. Song Name
|
# NN. Song Name
|
||||||
name = str(self).split(" / ")[1]
|
name = str(self).split(" / ")[1]
|
||||||
|
|
||||||
|
if config.scene_naming:
|
||||||
# Service
|
# Service
|
||||||
if show_service:
|
if show_service:
|
||||||
name += f" {self.service.__name__}"
|
name += f" {self.service.__name__}"
|
||||||
@@ -116,6 +117,9 @@ class Song(Title):
|
|||||||
name += f"-{config.tag}"
|
name += f"-{config.tag}"
|
||||||
|
|
||||||
return sanitize_filename(name, " ")
|
return sanitize_filename(name, " ")
|
||||||
|
else:
|
||||||
|
# Simple naming style without technical details
|
||||||
|
return sanitize_filename(name, " ")
|
||||||
|
|
||||||
|
|
||||||
class Album(SortedKeyList, ABC):
|
class Album(SortedKeyList, ABC):
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ class Hybrid:
|
|||||||
|
|
||||||
for video in self.videos:
|
for video in self.videos:
|
||||||
if not video.path or not os.path.exists(video.path):
|
if not video.path or not os.path.exists(video.path):
|
||||||
self.log.exit(f" - Video track {video.id} was not downloaded before injection.")
|
raise ValueError(f"Video track {video.id} was not downloaded before injection.")
|
||||||
|
|
||||||
# Check if we have DV track available
|
# Check if we have DV track available
|
||||||
has_dv = any(video.range == Video.Range.DV for video in self.videos)
|
has_dv = any(video.range == Video.Range.DV for video in self.videos)
|
||||||
@@ -51,14 +51,14 @@ class Hybrid:
|
|||||||
has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
|
has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
|
||||||
|
|
||||||
if not has_hdr10:
|
if not has_hdr10:
|
||||||
self.log.exit(" - No HDR10 track available for hybrid processing.")
|
raise ValueError("No HDR10 track available for hybrid processing.")
|
||||||
|
|
||||||
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
|
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
|
||||||
if not has_dv and has_hdr10p:
|
if not has_dv and has_hdr10p:
|
||||||
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
|
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
|
||||||
self.hdr10plus_to_dv = True
|
self.hdr10plus_to_dv = True
|
||||||
elif not has_dv:
|
elif not has_dv:
|
||||||
self.log.exit(" - No DV track available and no HDR10+ to convert.")
|
raise ValueError("No DV track available and no HDR10+ to convert.")
|
||||||
|
|
||||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||||
self.log.info("✓ Already Injected")
|
self.log.info("✓ Already Injected")
|
||||||
@@ -68,7 +68,7 @@ class Hybrid:
|
|||||||
# Use the actual path from the video track
|
# Use the actual path from the video track
|
||||||
save_path = video.path
|
save_path = video.path
|
||||||
if not save_path or not os.path.exists(save_path):
|
if not save_path or not os.path.exists(save_path):
|
||||||
self.log.exit(f" - Video track {video.id} was not downloaded or path not found: {save_path}")
|
raise ValueError(f"Video track {video.id} was not downloaded or path not found: {save_path}")
|
||||||
|
|
||||||
if video.range == Video.Range.HDR10:
|
if video.range == Video.Range.HDR10:
|
||||||
self.extract_stream(save_path, "HDR10")
|
self.extract_stream(save_path, "HDR10")
|
||||||
@@ -126,8 +126,7 @@ class Hybrid:
|
|||||||
def extract_stream(self, save_path, type_):
|
def extract_stream(self, save_path, type_):
|
||||||
output = Path(config.directories.temp / f"{type_}.hevc")
|
output = Path(config.directories.temp / f"{type_}.hevc")
|
||||||
|
|
||||||
self.log.info(f"+ Extracting {type_} stream")
|
with console.status(f"Extracting {type_} stream...", spinner="dots"):
|
||||||
|
|
||||||
returncode = self.ffmpeg_simple(save_path, output)
|
returncode = self.ffmpeg_simple(save_path, output)
|
||||||
|
|
||||||
if returncode:
|
if returncode:
|
||||||
@@ -135,14 +134,17 @@ class Hybrid:
|
|||||||
self.log.error(f"x Failed extracting {type_} stream")
|
self.log.error(f"x Failed extracting {type_} stream")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
self.log.info(f"Extracted {type_} stream")
|
||||||
|
|
||||||
def extract_rpu(self, video, untouched=False):
|
def extract_rpu(self, video, untouched=False):
|
||||||
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
|
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
|
||||||
config.directories.temp / "RPU_UNT.bin"
|
config.directories.temp / "RPU_UNT.bin"
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
self.log.info(f"+ Extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
with console.status(
|
||||||
|
f"Extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream...", spinner="dots"
|
||||||
|
):
|
||||||
extraction_args = [str(DoviTool)]
|
extraction_args = [str(DoviTool)]
|
||||||
if not untouched:
|
if not untouched:
|
||||||
extraction_args += ["-m", "3"]
|
extraction_args += ["-m", "3"]
|
||||||
@@ -164,9 +166,11 @@ class Hybrid:
|
|||||||
if b"MAX_PQ_LUMINANCE" in rpu_extraction.stderr:
|
if b"MAX_PQ_LUMINANCE" in rpu_extraction.stderr:
|
||||||
self.extract_rpu(video, untouched=True)
|
self.extract_rpu(video, untouched=True)
|
||||||
elif b"Invalid PPS index" in rpu_extraction.stderr:
|
elif b"Invalid PPS index" in rpu_extraction.stderr:
|
||||||
self.log.exit("x Dolby Vision VideoTrack seems to be corrupt")
|
raise ValueError("Dolby Vision VideoTrack seems to be corrupt")
|
||||||
else:
|
else:
|
||||||
self.log.exit(f"x Failed extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
raise ValueError(f"Failed extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||||
|
|
||||||
|
self.log.info(f"Extracted{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||||
|
|
||||||
def level_6(self):
|
def level_6(self):
|
||||||
"""Edit RPU Level 6 values"""
|
"""Edit RPU Level 6 values"""
|
||||||
@@ -185,7 +189,7 @@ class Hybrid:
|
|||||||
json.dump(level6, level6_file, indent=3)
|
json.dump(level6, level6_file, indent=3)
|
||||||
|
|
||||||
if not os.path.isfile(config.directories.temp / "RPU_L6.bin"):
|
if not os.path.isfile(config.directories.temp / "RPU_L6.bin"):
|
||||||
self.log.info("+ Editing RPU Level 6 values")
|
with console.status("Editing RPU Level 6 values...", spinner="dots"):
|
||||||
level6 = subprocess.run(
|
level6 = subprocess.run(
|
||||||
[
|
[
|
||||||
str(DoviTool),
|
str(DoviTool),
|
||||||
@@ -203,7 +207,9 @@ class Hybrid:
|
|||||||
|
|
||||||
if level6.returncode:
|
if level6.returncode:
|
||||||
Path.unlink(config.directories.temp / "RPU_L6.bin")
|
Path.unlink(config.directories.temp / "RPU_L6.bin")
|
||||||
self.log.exit("x Failed editing RPU Level 6 values")
|
raise ValueError("Failed editing RPU Level 6 values")
|
||||||
|
|
||||||
|
self.log.info("Edited RPU Level 6 values")
|
||||||
|
|
||||||
# Update rpu_file to use the edited version
|
# Update rpu_file to use the edited version
|
||||||
self.rpu_file = "RPU_L6.bin"
|
self.rpu_file = "RPU_L6.bin"
|
||||||
@@ -212,8 +218,7 @@ class Hybrid:
|
|||||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||||
return
|
return
|
||||||
|
|
||||||
self.log.info(f"+ Injecting Dolby Vision metadata into {self.hdr_type} stream")
|
with console.status(f"Injecting Dolby Vision metadata into {self.hdr_type} stream...", spinner="dots"):
|
||||||
|
|
||||||
inject_cmd = [
|
inject_cmd = [
|
||||||
str(DoviTool),
|
str(DoviTool),
|
||||||
"inject-rpu",
|
"inject-rpu",
|
||||||
@@ -239,7 +244,9 @@ class Hybrid:
|
|||||||
|
|
||||||
if inject.returncode:
|
if inject.returncode:
|
||||||
Path.unlink(config.directories.temp / self.hevc_file)
|
Path.unlink(config.directories.temp / self.hevc_file)
|
||||||
self.log.exit("x Failed injecting Dolby Vision metadata into HDR10 stream")
|
raise ValueError("Failed injecting Dolby Vision metadata into HDR10 stream")
|
||||||
|
|
||||||
|
self.log.info(f"Injected Dolby Vision metadata into {self.hdr_type} stream")
|
||||||
|
|
||||||
def extract_hdr10plus(self, _video):
|
def extract_hdr10plus(self, _video):
|
||||||
"""Extract HDR10+ metadata from the video stream"""
|
"""Extract HDR10+ metadata from the video stream"""
|
||||||
@@ -247,10 +254,9 @@ class Hybrid:
|
|||||||
return
|
return
|
||||||
|
|
||||||
if not HDR10PlusTool:
|
if not HDR10PlusTool:
|
||||||
self.log.exit("x HDR10Plus_tool not found. Please install it to use HDR10+ to DV conversion.")
|
raise ValueError("HDR10Plus_tool not found. Please install it to use HDR10+ to DV conversion.")
|
||||||
|
|
||||||
self.log.info("+ Extracting HDR10+ metadata")
|
|
||||||
|
|
||||||
|
with console.status("Extracting HDR10+ metadata...", spinner="dots"):
|
||||||
# HDR10Plus_tool needs raw HEVC stream
|
# HDR10Plus_tool needs raw HEVC stream
|
||||||
extraction = subprocess.run(
|
extraction = subprocess.run(
|
||||||
[
|
[
|
||||||
@@ -265,19 +271,20 @@ class Hybrid:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if extraction.returncode:
|
if extraction.returncode:
|
||||||
self.log.exit("x Failed extracting HDR10+ metadata")
|
raise ValueError("Failed extracting HDR10+ metadata")
|
||||||
|
|
||||||
# Check if the extracted file has content
|
# Check if the extracted file has content
|
||||||
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
|
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
|
||||||
self.log.exit("x No HDR10+ metadata found in the stream")
|
raise ValueError("No HDR10+ metadata found in the stream")
|
||||||
|
|
||||||
|
self.log.info("Extracted HDR10+ metadata")
|
||||||
|
|
||||||
def convert_hdr10plus_to_dv(self):
|
def convert_hdr10plus_to_dv(self):
|
||||||
"""Convert HDR10+ metadata to Dolby Vision RPU"""
|
"""Convert HDR10+ metadata to Dolby Vision RPU"""
|
||||||
if os.path.isfile(config.directories.temp / "RPU.bin"):
|
if os.path.isfile(config.directories.temp / "RPU.bin"):
|
||||||
return
|
return
|
||||||
|
|
||||||
self.log.info("+ Converting HDR10+ metadata to Dolby Vision")
|
with console.status("Converting HDR10+ metadata to Dolby Vision...", spinner="dots"):
|
||||||
|
|
||||||
# First create the extra metadata JSON for dovi_tool
|
# First create the extra metadata JSON for dovi_tool
|
||||||
extra_metadata = {
|
extra_metadata = {
|
||||||
"cm_version": "V29",
|
"cm_version": "V29",
|
||||||
@@ -310,8 +317,9 @@ class Hybrid:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if conversion.returncode:
|
if conversion.returncode:
|
||||||
self.log.exit("x Failed converting HDR10+ to Dolby Vision")
|
raise ValueError("Failed converting HDR10+ to Dolby Vision")
|
||||||
|
|
||||||
|
self.log.info("Converted HDR10+ metadata to Dolby Vision")
|
||||||
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
|
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
|
||||||
|
|
||||||
# Clean up temporary files
|
# Clean up temporary files
|
||||||
|
|||||||
@@ -233,6 +233,7 @@ class Subtitle(Track):
|
|||||||
try:
|
try:
|
||||||
caption_set = pycaption.WebVTTReader().read(text)
|
caption_set = pycaption.WebVTTReader().read(text)
|
||||||
Subtitle.merge_same_cues(caption_set)
|
Subtitle.merge_same_cues(caption_set)
|
||||||
|
Subtitle.filter_unwanted_cues(caption_set)
|
||||||
subtitle_text = pycaption.WebVTTWriter().write(caption_set)
|
subtitle_text = pycaption.WebVTTWriter().write(caption_set)
|
||||||
self.path.write_text(subtitle_text, encoding="utf8")
|
self.path.write_text(subtitle_text, encoding="utf8")
|
||||||
except pycaption.exceptions.CaptionReadSyntaxError:
|
except pycaption.exceptions.CaptionReadSyntaxError:
|
||||||
@@ -241,6 +242,7 @@ class Subtitle(Track):
|
|||||||
try:
|
try:
|
||||||
caption_set = pycaption.WebVTTReader().read(text)
|
caption_set = pycaption.WebVTTReader().read(text)
|
||||||
Subtitle.merge_same_cues(caption_set)
|
Subtitle.merge_same_cues(caption_set)
|
||||||
|
Subtitle.filter_unwanted_cues(caption_set)
|
||||||
subtitle_text = pycaption.WebVTTWriter().write(caption_set)
|
subtitle_text = pycaption.WebVTTWriter().write(caption_set)
|
||||||
self.path.write_text(subtitle_text, encoding="utf8")
|
self.path.write_text(subtitle_text, encoding="utf8")
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -444,6 +446,8 @@ class Subtitle(Track):
|
|||||||
|
|
||||||
caption_set = self.parse(self.path.read_bytes(), self.codec)
|
caption_set = self.parse(self.path.read_bytes(), self.codec)
|
||||||
Subtitle.merge_same_cues(caption_set)
|
Subtitle.merge_same_cues(caption_set)
|
||||||
|
if codec == Subtitle.Codec.WebVTT:
|
||||||
|
Subtitle.filter_unwanted_cues(caption_set)
|
||||||
subtitle_text = writer().write(caption_set)
|
subtitle_text = writer().write(caption_set)
|
||||||
|
|
||||||
output_path.write_text(subtitle_text, encoding="utf8")
|
output_path.write_text(subtitle_text, encoding="utf8")
|
||||||
@@ -520,6 +524,8 @@ class Subtitle(Track):
|
|||||||
|
|
||||||
caption_set = self.parse(self.path.read_bytes(), self.codec)
|
caption_set = self.parse(self.path.read_bytes(), self.codec)
|
||||||
Subtitle.merge_same_cues(caption_set)
|
Subtitle.merge_same_cues(caption_set)
|
||||||
|
if codec == Subtitle.Codec.WebVTT:
|
||||||
|
Subtitle.filter_unwanted_cues(caption_set)
|
||||||
subtitle_text = writer().write(caption_set)
|
subtitle_text = writer().write(caption_set)
|
||||||
|
|
||||||
output_path.write_text(subtitle_text, encoding="utf8")
|
output_path.write_text(subtitle_text, encoding="utf8")
|
||||||
@@ -681,6 +687,24 @@ class Subtitle(Track):
|
|||||||
if merged_captions:
|
if merged_captions:
|
||||||
caption_set.set_captions(lang, merged_captions)
|
caption_set.set_captions(lang, merged_captions)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def filter_unwanted_cues(caption_set: pycaption.CaptionSet):
|
||||||
|
"""
|
||||||
|
Filter out subtitle cues containing only or whitespace.
|
||||||
|
"""
|
||||||
|
for lang in caption_set.get_languages():
|
||||||
|
captions = caption_set.get_captions(lang)
|
||||||
|
filtered_captions = pycaption.CaptionList()
|
||||||
|
|
||||||
|
for caption in captions:
|
||||||
|
text = caption.get_text().strip()
|
||||||
|
if not text or text == " " or all(c in " \t\n\r\xa0" for c in text.replace(" ", "\xa0")):
|
||||||
|
continue
|
||||||
|
|
||||||
|
filtered_captions.append(caption)
|
||||||
|
|
||||||
|
caption_set.set_captions(lang, filtered_captions)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def merge_segmented_wvtt(data: bytes, period_start: float = 0.0) -> tuple[CaptionList, Optional[str]]:
|
def merge_segmented_wvtt(data: bytes, period_start: float = 0.0) -> tuple[CaptionList, Optional[str]]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -355,6 +355,14 @@ class Tracks:
|
|||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if hasattr(vt, "range") and vt.range == Video.Range.HLG:
|
||||||
|
video_args.extend(
|
||||||
|
[
|
||||||
|
"--color-transfer-characteristics",
|
||||||
|
"0:18", # ARIB STD-B67 (HLG)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
cl.extend(video_args + ["(", str(vt.path), ")"])
|
cl.extend(video_args + ["(", str(vt.path), ")"])
|
||||||
|
|
||||||
for i, at in enumerate(self.audio):
|
for i, at in enumerate(self.audio):
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ class Video(Track):
|
|||||||
class Transfer(Enum):
|
class Transfer(Enum):
|
||||||
Unspecified = 0
|
Unspecified = 0
|
||||||
BT_709 = 1
|
BT_709 = 1
|
||||||
|
Unspecified_Image = 2
|
||||||
BT_601 = 6
|
BT_601 = 6
|
||||||
BT_2020 = 14
|
BT_2020 = 14
|
||||||
BT_2100 = 15
|
BT_2100 = 15
|
||||||
|
|||||||
@@ -1,16 +1,165 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
|
||||||
class UpdateChecker:
|
class UpdateChecker:
|
||||||
"""Check for available updates from the GitHub repository."""
|
"""
|
||||||
|
Check for available updates from the GitHub repository.
|
||||||
|
|
||||||
|
This class provides functionality to check for newer versions of the application
|
||||||
|
by querying the GitHub releases API. It includes rate limiting, caching, and
|
||||||
|
both synchronous and asynchronous interfaces.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
REPO_URL: GitHub API URL for latest release
|
||||||
|
TIMEOUT: Request timeout in seconds
|
||||||
|
DEFAULT_CHECK_INTERVAL: Default time between checks in seconds (24 hours)
|
||||||
|
"""
|
||||||
|
|
||||||
REPO_URL = "https://api.github.com/repos/unshackle-dl/unshackle/releases/latest"
|
REPO_URL = "https://api.github.com/repos/unshackle-dl/unshackle/releases/latest"
|
||||||
TIMEOUT = 5
|
TIMEOUT = 5
|
||||||
|
DEFAULT_CHECK_INTERVAL = 24 * 60 * 60
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _get_cache_file(cls) -> Path:
|
||||||
|
"""Get the path to the update check cache file."""
|
||||||
|
from unshackle.core.config import config
|
||||||
|
|
||||||
|
return config.directories.cache / "update_check.json"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _load_cache_data(cls) -> dict:
|
||||||
|
"""
|
||||||
|
Load cache data from file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Cache data dictionary or empty dict if loading fails
|
||||||
|
"""
|
||||||
|
cache_file = cls._get_cache_file()
|
||||||
|
|
||||||
|
if not cache_file.exists():
|
||||||
|
return {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(cache_file, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except (json.JSONDecodeError, OSError):
|
||||||
|
return {}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_version(version_string: str) -> str:
|
||||||
|
"""
|
||||||
|
Parse and normalize version string by removing 'v' prefix.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
version_string: Raw version string from API
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Cleaned version string
|
||||||
|
"""
|
||||||
|
return version_string.lstrip("v")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _is_valid_version(version: str) -> bool:
|
||||||
|
"""
|
||||||
|
Validate version string format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
version: Version string to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if version string is valid semantic version, False otherwise
|
||||||
|
"""
|
||||||
|
if not version or not isinstance(version, str):
|
||||||
|
return False
|
||||||
|
|
||||||
|
try:
|
||||||
|
parts = version.split(".")
|
||||||
|
if len(parts) < 2:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for part in parts:
|
||||||
|
int(part)
|
||||||
|
|
||||||
|
return True
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _fetch_latest_version(cls) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Fetch the latest version from GitHub API.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Latest version string if successful, None otherwise
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response = requests.get(cls.REPO_URL, timeout=cls.TIMEOUT)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
return None
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
latest_version = cls._parse_version(data.get("tag_name", ""))
|
||||||
|
|
||||||
|
return latest_version if cls._is_valid_version(latest_version) else None
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _should_check_for_updates(cls, check_interval: int = DEFAULT_CHECK_INTERVAL) -> bool:
|
||||||
|
"""
|
||||||
|
Check if enough time has passed since the last update check.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
check_interval: Time in seconds between checks (default: 24 hours)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if we should check for updates, False otherwise
|
||||||
|
"""
|
||||||
|
cache_data = cls._load_cache_data()
|
||||||
|
|
||||||
|
if not cache_data:
|
||||||
|
return True
|
||||||
|
|
||||||
|
last_check = cache_data.get("last_check", 0)
|
||||||
|
current_time = time.time()
|
||||||
|
|
||||||
|
return (current_time - last_check) >= check_interval
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _update_cache(cls, latest_version: Optional[str] = None, current_version: Optional[str] = None) -> None:
|
||||||
|
"""
|
||||||
|
Update the cache file with the current timestamp and version info.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
latest_version: The latest version found, if any
|
||||||
|
current_version: The current version being used
|
||||||
|
"""
|
||||||
|
cache_file = cls._get_cache_file()
|
||||||
|
|
||||||
|
try:
|
||||||
|
cache_file.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
cache_data = {
|
||||||
|
"last_check": time.time(),
|
||||||
|
"latest_version": latest_version,
|
||||||
|
"current_version": current_version,
|
||||||
|
}
|
||||||
|
|
||||||
|
with open(cache_file, "w") as f:
|
||||||
|
json.dump(cache_data, f, indent=2)
|
||||||
|
|
||||||
|
except (OSError, json.JSONEncodeError):
|
||||||
|
pass
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _compare_versions(current: str, latest: str) -> bool:
|
def _compare_versions(current: str, latest: str) -> bool:
|
||||||
@@ -24,6 +173,9 @@ class UpdateChecker:
|
|||||||
Returns:
|
Returns:
|
||||||
True if latest > current, False otherwise
|
True if latest > current, False otherwise
|
||||||
"""
|
"""
|
||||||
|
if not UpdateChecker._is_valid_version(current) or not UpdateChecker._is_valid_version(latest):
|
||||||
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
current_parts = [int(x) for x in current.split(".")]
|
current_parts = [int(x) for x in current.split(".")]
|
||||||
latest_parts = [int(x) for x in latest.split(".")]
|
latest_parts = [int(x) for x in latest.split(".")]
|
||||||
@@ -53,20 +205,14 @@ class UpdateChecker:
|
|||||||
Returns:
|
Returns:
|
||||||
The latest version string if an update is available, None otherwise
|
The latest version string if an update is available, None otherwise
|
||||||
"""
|
"""
|
||||||
|
if not cls._is_valid_version(current_version):
|
||||||
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
response = await loop.run_in_executor(None, lambda: requests.get(cls.REPO_URL, timeout=cls.TIMEOUT))
|
latest_version = await loop.run_in_executor(None, cls._fetch_latest_version)
|
||||||
|
|
||||||
if response.status_code != 200:
|
if latest_version and cls._compare_versions(current_version, latest_version):
|
||||||
return None
|
|
||||||
|
|
||||||
data = response.json()
|
|
||||||
latest_version = data.get("tag_name", "").lstrip("v")
|
|
||||||
|
|
||||||
if not latest_version:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if cls._compare_versions(current_version, latest_version):
|
|
||||||
return latest_version
|
return latest_version
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
@@ -75,32 +221,56 @@ class UpdateChecker:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_for_updates_sync(cls, current_version: str) -> Optional[str]:
|
def _get_cached_update_info(cls, current_version: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Synchronous version of update check.
|
Check if there's a cached update available for the current version.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_version: The current version string
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The latest version string if an update is available from cache, None otherwise
|
||||||
|
"""
|
||||||
|
cache_data = cls._load_cache_data()
|
||||||
|
|
||||||
|
if not cache_data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
cached_current = cache_data.get("current_version")
|
||||||
|
cached_latest = cache_data.get("latest_version")
|
||||||
|
|
||||||
|
if cached_current == current_version and cached_latest:
|
||||||
|
if cls._compare_versions(current_version, cached_latest):
|
||||||
|
return cached_latest
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def check_for_updates_sync(cls, current_version: str, check_interval: Optional[int] = None) -> Optional[str]:
|
||||||
|
"""
|
||||||
|
Synchronous version of update check with rate limiting.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
current_version: The current version string (e.g., "1.1.0")
|
current_version: The current version string (e.g., "1.1.0")
|
||||||
|
check_interval: Time in seconds between checks (default: from config)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The latest version string if an update is available, None otherwise
|
The latest version string if an update is available, None otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
if not cls._is_valid_version(current_version):
|
||||||
response = requests.get(cls.REPO_URL, timeout=cls.TIMEOUT)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
data = response.json()
|
if check_interval is None:
|
||||||
latest_version = data.get("tag_name", "").lstrip("v")
|
from unshackle.core.config import config
|
||||||
|
|
||||||
if not latest_version:
|
check_interval = config.update_check_interval * 60 * 60
|
||||||
return None
|
|
||||||
|
|
||||||
if cls._compare_versions(current_version, latest_version):
|
if not cls._should_check_for_updates(check_interval):
|
||||||
|
return cls._get_cached_update_info(current_version)
|
||||||
|
|
||||||
|
latest_version = cls._fetch_latest_version()
|
||||||
|
cls._update_cache(latest_version, current_version)
|
||||||
|
if latest_version and cls._compare_versions(current_version, latest_version):
|
||||||
return latest_version
|
return latest_version
|
||||||
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ class EXAMPLE(Service):
|
|||||||
|
|
||||||
TITLE_RE = r"^(?:https?://?domain\.com/details/)?(?P<title_id>[^/]+)"
|
TITLE_RE = r"^(?:https?://?domain\.com/details/)?(?P<title_id>[^/]+)"
|
||||||
GEOFENCE = ("US", "UK")
|
GEOFENCE = ("US", "UK")
|
||||||
|
NO_SUBTITLES = True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@click.command(name="EXAMPLE", short_help="https://domain.com")
|
@click.command(name="EXAMPLE", short_help="https://domain.com")
|
||||||
|
|||||||
@@ -4,17 +4,40 @@ tag: user_tag
|
|||||||
# Set terminal background color (custom option not in CONFIG.md)
|
# Set terminal background color (custom option not in CONFIG.md)
|
||||||
set_terminal_bg: false
|
set_terminal_bg: false
|
||||||
|
|
||||||
|
# Set file naming convention
|
||||||
|
# true for style - Prime.Suspect.S07E01.The.Final.Act.Part.One.1080p.ITV.WEB-DL.AAC2.0.H.264
|
||||||
|
# false for style - Prime Suspect S07E01 The Final Act - Part One
|
||||||
|
scene_naming: true
|
||||||
|
|
||||||
# Check for updates from GitHub repository on startup (default: true)
|
# Check for updates from GitHub repository on startup (default: true)
|
||||||
update_checks: true
|
update_checks: true
|
||||||
|
|
||||||
|
# How often to check for updates, in hours (default: 24)
|
||||||
|
update_check_interval: 24
|
||||||
|
|
||||||
# Muxing configuration
|
# Muxing configuration
|
||||||
muxing:
|
muxing:
|
||||||
set_title: false
|
set_title: false
|
||||||
|
|
||||||
# Login credentials for each Service
|
# Login credentials for each Service
|
||||||
credentials:
|
credentials:
|
||||||
|
# Direct credentials (no profile support)
|
||||||
EXAMPLE: email@example.com:password
|
EXAMPLE: email@example.com:password
|
||||||
EXAMPLE2: username:password
|
|
||||||
|
# Per-profile credentials with default fallback
|
||||||
|
SERVICE_NAME:
|
||||||
|
default: default@email.com:password # Used when no -p/--profile is specified
|
||||||
|
profile1: user1@email.com:password1
|
||||||
|
profile2: user2@email.com:password2
|
||||||
|
|
||||||
|
# Per-profile credentials without default (requires -p/--profile)
|
||||||
|
SERVICE_NAME2:
|
||||||
|
john: john@example.com:johnspassword
|
||||||
|
jane: jane@example.com:janespassword
|
||||||
|
|
||||||
|
# You can also use list format for passwords with special characters
|
||||||
|
SERVICE_NAME3:
|
||||||
|
default: ["user@email.com", ":PasswordWith:Colons"]
|
||||||
|
|
||||||
# Override default directories used across unshackle
|
# Override default directories used across unshackle
|
||||||
directories:
|
directories:
|
||||||
@@ -36,8 +59,17 @@ directories:
|
|||||||
|
|
||||||
# Pre-define which Widevine or PlayReady device to use for each Service
|
# Pre-define which Widevine or PlayReady device to use for each Service
|
||||||
cdm:
|
cdm:
|
||||||
|
# Global default CDM device (fallback for all services/profiles)
|
||||||
default: WVD_1
|
default: WVD_1
|
||||||
EXAMPLE: PRD_1
|
|
||||||
|
# Direct service-specific CDM
|
||||||
|
DIFFERENT_EXAMPLE: PRD_1
|
||||||
|
|
||||||
|
# Per-profile CDM configuration
|
||||||
|
EXAMPLE:
|
||||||
|
john_sd: chromecdm_903_l3 # Profile 'john_sd' uses Chrome CDM L3
|
||||||
|
jane_uhd: nexus_5_l1 # Profile 'jane_uhd' uses Nexus 5 L1
|
||||||
|
default: generic_android_l3 # Default CDM for this service
|
||||||
|
|
||||||
# Use pywidevine Serve-compliant Remote CDMs
|
# Use pywidevine Serve-compliant Remote CDMs
|
||||||
remote_cdm:
|
remote_cdm:
|
||||||
@@ -154,20 +186,45 @@ serve:
|
|||||||
# Configuration data for each Service
|
# Configuration data for each Service
|
||||||
services:
|
services:
|
||||||
# Service-specific configuration goes here
|
# Service-specific configuration goes here
|
||||||
# EXAMPLE:
|
# Profile-specific configurations can be nested under service names
|
||||||
# api_key: "service_specific_key"
|
|
||||||
|
# Example: with profile-specific device configs
|
||||||
|
EXAMPLE:
|
||||||
|
# Global service config
|
||||||
|
api_key: "service_api_key"
|
||||||
|
|
||||||
|
# Profile-specific device configurations
|
||||||
|
profiles:
|
||||||
|
john_sd:
|
||||||
|
device:
|
||||||
|
app_name: "AIV"
|
||||||
|
device_model: "SHIELD Android TV"
|
||||||
|
jane_uhd:
|
||||||
|
device:
|
||||||
|
app_name: "AIV"
|
||||||
|
device_model: "Fire TV Stick 4K"
|
||||||
|
|
||||||
|
# Example: Service with different regions per profile
|
||||||
|
SERVICE_NAME:
|
||||||
|
profiles:
|
||||||
|
us_account:
|
||||||
|
region: "US"
|
||||||
|
api_endpoint: "https://api.us.service.com"
|
||||||
|
uk_account:
|
||||||
|
region: "GB"
|
||||||
|
api_endpoint: "https://api.uk.service.com"
|
||||||
|
|
||||||
# External proxy provider services
|
# External proxy provider services
|
||||||
proxy_providers:
|
proxy_providers:
|
||||||
nordvpn:
|
nordvpn:
|
||||||
username: username_from_service_credentials
|
username: username_from_service_credentials
|
||||||
password: password_from_service_credentials
|
password: password_from_service_credentials
|
||||||
servers:
|
server_map:
|
||||||
- us: 12 # force US server #12 for US proxies
|
- us: 12 # force US server #12 for US proxies
|
||||||
surfsharkvpn:
|
surfsharkvpn:
|
||||||
username: your_surfshark_service_username # Service credentials from https://my.surfshark.com/vpn/manual-setup/main/openvpn
|
username: your_surfshark_service_username # Service credentials from https://my.surfshark.com/vpn/manual-setup/main/openvpn
|
||||||
password: your_surfshark_service_password # Service credentials (not your login password)
|
password: your_surfshark_service_password # Service credentials (not your login password)
|
||||||
servers:
|
server_map:
|
||||||
- us: 3844 # force US server #3844 for US proxies
|
- us: 3844 # force US server #3844 for US proxies
|
||||||
- gb: 2697 # force GB server #2697 for GB proxies
|
- gb: 2697 # force GB server #2697 for GB proxies
|
||||||
- au: 4621 # force AU server #4621 for AU proxies
|
- au: 4621 # force AU server #4621 for AU proxies
|
||||||
@@ -30,7 +30,7 @@ class HTTP(Vault):
|
|||||||
api_mode: "query" for query parameters or "json" for JSON API
|
api_mode: "query" for query parameters or "json" for JSON API
|
||||||
"""
|
"""
|
||||||
super().__init__(name)
|
super().__init__(name)
|
||||||
self.url = host.rstrip("/")
|
self.url = host
|
||||||
self.password = password
|
self.password = password
|
||||||
self.username = username
|
self.username = username
|
||||||
self.api_mode = api_mode.lower()
|
self.api_mode = api_mode.lower()
|
||||||
@@ -88,21 +88,23 @@ class HTTP(Vault):
|
|||||||
|
|
||||||
if self.api_mode == "json":
|
if self.api_mode == "json":
|
||||||
try:
|
try:
|
||||||
title = getattr(self, "current_title", None)
|
params = {
|
||||||
response = self.request(
|
|
||||||
"GetKey",
|
|
||||||
{
|
|
||||||
"kid": kid,
|
"kid": kid,
|
||||||
"service": service.lower(),
|
"service": service.lower(),
|
||||||
"title": title,
|
}
|
||||||
},
|
|
||||||
)
|
response = self.request("GetKey", params)
|
||||||
if response.get("status") == "not_found":
|
if response.get("status") == "not_found":
|
||||||
return None
|
return None
|
||||||
keys = response.get("keys", [])
|
keys = response.get("keys", [])
|
||||||
for key_entry in keys:
|
for key_entry in keys:
|
||||||
if key_entry["kid"] == kid:
|
if isinstance(key_entry, str) and ":" in key_entry:
|
||||||
return key_entry["key"]
|
entry_kid, entry_key = key_entry.split(":", 1)
|
||||||
|
if entry_kid == kid:
|
||||||
|
return entry_key
|
||||||
|
elif isinstance(key_entry, dict):
|
||||||
|
if key_entry.get("kid") == kid:
|
||||||
|
return key_entry.get("key")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to get key ({e.__class__.__name__}: {e})")
|
print(f"Failed to get key ({e.__class__.__name__}: {e})")
|
||||||
return None
|
return None
|
||||||
|
|||||||
Reference in New Issue
Block a user