mirror of
https://github.com/unshackle-dl/unshackle.git
synced 2025-10-23 15:11:08 +00:00
Merge branch 'main' into feature/add-rest-api
This commit is contained in:
39
CHANGELOG.md
39
CHANGELOG.md
@@ -5,6 +5,45 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.4.8] - 2025-10-08
|
||||
|
||||
### Added
|
||||
|
||||
- **Exact Language Matching**: New `--exact-lang` flag for precise language matching
|
||||
- Enables strict language code matching without fallbacks
|
||||
- **No-Mux Flag**: New `--no-mux` flag to skip muxing tracks into container files
|
||||
- Useful for keeping individual track files separate
|
||||
- **DecryptLabs API Integration for HTTP Vault**: Enhanced vault support
|
||||
- Added DecryptLabs API support to HTTP vault for improved key retrieval
|
||||
- **AC4 Audio Codec Support**: Enhanced audio format handling
|
||||
- Added AC4 codec support in Audio class with updated mime/profile handling
|
||||
- **pysubs2 Subtitle Conversion**: Extended subtitle format support
|
||||
- Added pysubs2 subtitle conversion with extended format support
|
||||
- Configurable conversion method in configuration
|
||||
|
||||
### Changed
|
||||
|
||||
- **Audio Track Sorting**: Optimized audio track selection logic
|
||||
- Improved audio track sorting by grouping descriptive tracks and sorting by bitrate
|
||||
- Better identification of ATMOS and DD+ as highest quality for filenaming
|
||||
- **pyplayready Update**: Upgraded to version 0.6.3
|
||||
- Updated import paths to resolve compatibility issues
|
||||
- Fixed lxml constraints for better dependency management
|
||||
- **pysubs2 Conversion Method**: Moved from auto to manual configuration
|
||||
- pysubs2 no longer auto-selected during testing phase
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Remote CDM**: Fixed curl_cffi compatibility
|
||||
- Added curl_cffi to instance checks in RemoteCDM
|
||||
- **Temporary File Handling**: Improved encoding handling
|
||||
- Specified UTF-8 encoding when opening temporary files
|
||||
|
||||
### Reverted
|
||||
|
||||
- **tinycss SyntaxWarning Suppression**: Removed ineffective warning filter
|
||||
- Reverted warnings filter that didn't work as expected for suppressing tinycss warnings
|
||||
|
||||
## [1.4.7] - 2025-09-25
|
||||
|
||||
### Added
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "unshackle"
|
||||
version = "1.4.6"
|
||||
version = "1.4.8"
|
||||
description = "Modular Movie, TV, and Music Archival Software."
|
||||
authors = [{ name = "unshackle team" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
|
||||
@@ -180,6 +180,12 @@ class dl:
|
||||
help="Required subtitle languages. Downloads all subtitles only if these languages exist. Cannot be used with --s-lang.",
|
||||
)
|
||||
@click.option("-fs", "--forced-subs", is_flag=True, default=False, help="Include forced subtitle tracks.")
|
||||
@click.option(
|
||||
"--exact-lang",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help="Use exact language matching (no variants). With this flag, -l es-419 matches ONLY es-419, not es-ES or other variants.",
|
||||
)
|
||||
@click.option(
|
||||
"--proxy",
|
||||
type=str,
|
||||
@@ -258,6 +264,7 @@ class dl:
|
||||
@click.option(
|
||||
"--no-source", is_flag=True, default=False, help="Disable the source tag from the output file name and path."
|
||||
)
|
||||
@click.option("--no-mux", is_flag=True, default=False, help="Do not mux tracks into a container file.")
|
||||
@click.option(
|
||||
"--workers",
|
||||
type=int,
|
||||
@@ -467,6 +474,7 @@ class dl:
|
||||
s_lang: list[str],
|
||||
require_subs: list[str],
|
||||
forced_subs: bool,
|
||||
exact_lang: bool,
|
||||
sub_format: Optional[Subtitle.Codec],
|
||||
video_only: bool,
|
||||
audio_only: bool,
|
||||
@@ -484,6 +492,7 @@ class dl:
|
||||
no_proxy: bool,
|
||||
no_folder: bool,
|
||||
no_source: bool,
|
||||
no_mux: bool,
|
||||
workers: Optional[int],
|
||||
downloads: int,
|
||||
best_available: bool,
|
||||
@@ -707,7 +716,9 @@ class dl:
|
||||
else:
|
||||
if language not in processed_video_lang:
|
||||
processed_video_lang.append(language)
|
||||
title.tracks.videos = title.tracks.by_language(title.tracks.videos, processed_video_lang)
|
||||
title.tracks.videos = title.tracks.by_language(
|
||||
title.tracks.videos, processed_video_lang, exact_match=exact_lang
|
||||
)
|
||||
if not title.tracks.videos:
|
||||
self.log.error(f"There's no {processed_video_lang} Video Track...")
|
||||
sys.exit(1)
|
||||
@@ -790,16 +801,20 @@ class dl:
|
||||
f"Required languages found ({', '.join(require_subs)}), downloading all available subtitles"
|
||||
)
|
||||
elif s_lang and "all" not in s_lang:
|
||||
from unshackle.core.utilities import is_exact_match
|
||||
|
||||
match_func = is_exact_match if exact_lang else is_close_match
|
||||
|
||||
missing_langs = [
|
||||
lang_
|
||||
for lang_ in s_lang
|
||||
if not any(is_close_match(lang_, [sub.language]) for sub in title.tracks.subtitles)
|
||||
if not any(match_func(lang_, [sub.language]) for sub in title.tracks.subtitles)
|
||||
]
|
||||
if missing_langs:
|
||||
self.log.error(", ".join(missing_langs) + " not found in tracks")
|
||||
sys.exit(1)
|
||||
|
||||
title.tracks.select_subtitles(lambda x: is_close_match(x.language, s_lang))
|
||||
title.tracks.select_subtitles(lambda x: match_func(x.language, s_lang))
|
||||
if not title.tracks.subtitles:
|
||||
self.log.error(f"There's no {s_lang} Subtitle Track...")
|
||||
sys.exit(1)
|
||||
@@ -863,7 +878,7 @@ class dl:
|
||||
elif "all" not in processed_lang:
|
||||
per_language = 1
|
||||
title.tracks.audio = title.tracks.by_language(
|
||||
title.tracks.audio, processed_lang, per_language=per_language
|
||||
title.tracks.audio, processed_lang, per_language=per_language, exact_match=exact_lang
|
||||
)
|
||||
if not title.tracks.audio:
|
||||
self.log.error(f"There's no {processed_lang} Audio Track, cannot continue...")
|
||||
@@ -1139,7 +1154,12 @@ class dl:
|
||||
|
||||
muxed_paths = []
|
||||
|
||||
if isinstance(title, (Movie, Episode)):
|
||||
if no_mux:
|
||||
# Skip muxing, handle individual track files
|
||||
for track in title.tracks:
|
||||
if track.path and track.path.exists():
|
||||
muxed_paths.append(track.path)
|
||||
elif isinstance(title, (Movie, Episode)):
|
||||
progress = Progress(
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
SpinnerColumn(finished_text=""),
|
||||
@@ -1258,19 +1278,65 @@ class dl:
|
||||
# dont mux
|
||||
muxed_paths.append(title.tracks.audio[0].path)
|
||||
|
||||
for muxed_path in muxed_paths:
|
||||
media_info = MediaInfo.parse(muxed_path)
|
||||
if no_mux:
|
||||
# Handle individual track files without muxing
|
||||
final_dir = config.directories.downloads
|
||||
final_filename = title.get_filename(media_info, show_service=not no_source)
|
||||
|
||||
if not no_folder and isinstance(title, (Episode, Song)):
|
||||
final_dir /= title.get_filename(media_info, show_service=not no_source, folder=True)
|
||||
# Create folder based on title
|
||||
# Use first available track for filename generation
|
||||
sample_track = title.tracks.videos[0] if title.tracks.videos else (
|
||||
title.tracks.audio[0] if title.tracks.audio else (
|
||||
title.tracks.subtitles[0] if title.tracks.subtitles else None
|
||||
)
|
||||
)
|
||||
if sample_track and sample_track.path:
|
||||
media_info = MediaInfo.parse(sample_track.path)
|
||||
final_dir /= title.get_filename(media_info, show_service=not no_source, folder=True)
|
||||
|
||||
final_dir.mkdir(parents=True, exist_ok=True)
|
||||
final_path = final_dir / f"{final_filename}{muxed_path.suffix}"
|
||||
|
||||
shutil.move(muxed_path, final_path)
|
||||
tags.tag_file(final_path, title, self.tmdb_id)
|
||||
for track_path in muxed_paths:
|
||||
# Generate appropriate filename for each track
|
||||
media_info = MediaInfo.parse(track_path)
|
||||
base_filename = title.get_filename(media_info, show_service=not no_source)
|
||||
|
||||
# Add track type suffix to filename
|
||||
track = next((t for t in title.tracks if t.path == track_path), None)
|
||||
if track:
|
||||
if isinstance(track, Video):
|
||||
track_suffix = f".{track.codec.name if hasattr(track.codec, 'name') else 'video'}"
|
||||
elif isinstance(track, Audio):
|
||||
lang_suffix = f".{track.language}" if track.language else ""
|
||||
track_suffix = f"{lang_suffix}.{track.codec.name if hasattr(track.codec, 'name') else 'audio'}"
|
||||
elif isinstance(track, Subtitle):
|
||||
lang_suffix = f".{track.language}" if track.language else ""
|
||||
forced_suffix = ".forced" if track.forced else ""
|
||||
sdh_suffix = ".sdh" if track.sdh else ""
|
||||
track_suffix = f"{lang_suffix}{forced_suffix}{sdh_suffix}"
|
||||
else:
|
||||
track_suffix = ""
|
||||
|
||||
final_path = final_dir / f"{base_filename}{track_suffix}{track_path.suffix}"
|
||||
else:
|
||||
final_path = final_dir / f"{base_filename}{track_path.suffix}"
|
||||
|
||||
shutil.move(track_path, final_path)
|
||||
self.log.debug(f"Saved: {final_path.name}")
|
||||
else:
|
||||
# Handle muxed files
|
||||
for muxed_path in muxed_paths:
|
||||
media_info = MediaInfo.parse(muxed_path)
|
||||
final_dir = config.directories.downloads
|
||||
final_filename = title.get_filename(media_info, show_service=not no_source)
|
||||
|
||||
if not no_folder and isinstance(title, (Episode, Song)):
|
||||
final_dir /= title.get_filename(media_info, show_service=not no_source, folder=True)
|
||||
|
||||
final_dir.mkdir(parents=True, exist_ok=True)
|
||||
final_path = final_dir / f"{final_filename}{muxed_path.suffix}"
|
||||
|
||||
shutil.move(muxed_path, final_path)
|
||||
tags.tag_file(final_path, title, self.tmdb_id)
|
||||
|
||||
title_dl_time = time_elapsed_since(dl_start_time)
|
||||
console.print(
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
import warnings
|
||||
|
||||
# Suppress SyntaxWarning from unmaintained tinycss package (dependency of subby)
|
||||
# Must be set before any imports that might trigger tinycss loading
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="tinycss")
|
||||
|
||||
import atexit
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
@@ -6,6 +6,7 @@ DOWNLOAD_LICENCE_ONLY = Event()
|
||||
|
||||
DRM_SORT_MAP = ["ClearKey", "Widevine"]
|
||||
LANGUAGE_MAX_DISTANCE = 5 # this is max to be considered "same", e.g., en, en-US, en-AU
|
||||
LANGUAGE_EXACT_DISTANCE = 0 # exact match only, no variants
|
||||
VIDEO_CODEC_MAP = {"AVC": "H.264", "HEVC": "H.265"}
|
||||
DYNAMIC_RANGE_MAP = {"HDR10": "HDR", "HDR10+": "HDR10P", "Dolby Vision": "DV", "HDR10 / HDR10+": "HDR10P", "HDR10 / HDR10": "HDR"}
|
||||
AUDIO_CODEC_MAP = {"E-AC-3": "DDP", "AC-3": "DD"}
|
||||
|
||||
@@ -14,7 +14,7 @@ from rich.tree import Tree
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import console
|
||||
from unshackle.core.constants import LANGUAGE_MAX_DISTANCE, AnyTrack, TrackT
|
||||
from unshackle.core.constants import LANGUAGE_EXACT_DISTANCE, LANGUAGE_MAX_DISTANCE, AnyTrack, TrackT
|
||||
from unshackle.core.events import events
|
||||
from unshackle.core.tracks.attachment import Attachment
|
||||
from unshackle.core.tracks.audio import Audio
|
||||
@@ -294,11 +294,14 @@ class Tracks:
|
||||
self.videos = selected
|
||||
|
||||
@staticmethod
|
||||
def by_language(tracks: list[TrackT], languages: list[str], per_language: int = 0) -> list[TrackT]:
|
||||
def by_language(
|
||||
tracks: list[TrackT], languages: list[str], per_language: int = 0, exact_match: bool = False
|
||||
) -> list[TrackT]:
|
||||
distance = LANGUAGE_EXACT_DISTANCE if exact_match else LANGUAGE_MAX_DISTANCE
|
||||
selected = []
|
||||
for language in languages:
|
||||
selected.extend(
|
||||
[x for x in tracks if closest_supported_match(x.language, [language], LANGUAGE_MAX_DISTANCE)][
|
||||
[x for x in tracks if closest_supported_match(str(x.language), [language], distance)][
|
||||
: per_language or None
|
||||
]
|
||||
)
|
||||
|
||||
@@ -24,7 +24,7 @@ from unidecode import unidecode
|
||||
|
||||
from unshackle.core.cacher import Cacher
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.constants import LANGUAGE_MAX_DISTANCE
|
||||
from unshackle.core.constants import LANGUAGE_EXACT_DISTANCE, LANGUAGE_MAX_DISTANCE
|
||||
|
||||
|
||||
def rotate_log_file(log_path: Path, keep: int = 20) -> Path:
|
||||
@@ -114,6 +114,14 @@ def is_close_match(language: Union[str, Language], languages: Sequence[Union[str
|
||||
return closest_match(language, list(map(str, languages)))[1] <= LANGUAGE_MAX_DISTANCE
|
||||
|
||||
|
||||
def is_exact_match(language: Union[str, Language], languages: Sequence[Union[str, Language, None]]) -> bool:
|
||||
"""Check if a language is an exact match to any of the provided languages."""
|
||||
languages = [x for x in languages if x]
|
||||
if not languages:
|
||||
return False
|
||||
return closest_match(language, list(map(str, languages)))[1] <= LANGUAGE_EXACT_DISTANCE
|
||||
|
||||
|
||||
def get_boxes(data: bytes, box_type: bytes, as_bytes: bool = False) -> Box:
|
||||
"""
|
||||
Scan a byte array for a wanted MP4/ISOBMFF box, then parse and yield each find.
|
||||
|
||||
@@ -294,7 +294,7 @@ def _apply_tags(path: Path, tags: dict[str, str]) -> None:
|
||||
for name, value in tags.items():
|
||||
xml_lines.append(f" <Simple><Name>{escape(name)}</Name><String>{escape(value)}</String></Simple>")
|
||||
xml_lines.extend([" </Tag>", "</Tags>"])
|
||||
with tempfile.NamedTemporaryFile("w", suffix=".xml", delete=False) as f:
|
||||
with tempfile.NamedTemporaryFile("w", suffix=".xml", delete=False, encoding="utf-8") as f:
|
||||
f.write("\n".join(xml_lines))
|
||||
tmp_path = Path(f.name)
|
||||
try:
|
||||
|
||||
@@ -16,13 +16,21 @@ class InsertResult(Enum):
|
||||
|
||||
|
||||
class HTTP(Vault):
|
||||
"""Key Vault using HTTP API with support for both query parameters and JSON payloads."""
|
||||
"""
|
||||
Key Vault using HTTP API with support for multiple API modes.
|
||||
|
||||
Supported modes:
|
||||
- query: Uses GET requests with query parameters
|
||||
- json: Uses POST requests with JSON payloads
|
||||
- decrypt_labs: Uses DecryptLabs API format (read-only)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
host: str,
|
||||
password: str,
|
||||
password: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
username: Optional[str] = None,
|
||||
api_mode: str = "query",
|
||||
no_push: bool = False,
|
||||
@@ -34,13 +42,17 @@ class HTTP(Vault):
|
||||
name: Vault name
|
||||
host: Host URL
|
||||
password: Password for query mode or API token for json mode
|
||||
username: Username (required for query mode, ignored for json mode)
|
||||
api_mode: "query" for query parameters or "json" for JSON API
|
||||
api_key: API key (alternative to password, used for decrypt_labs mode)
|
||||
username: Username (required for query mode, ignored for json/decrypt_labs mode)
|
||||
api_mode: "query" for query parameters, "json" for JSON API, or "decrypt_labs" for DecryptLabs API
|
||||
no_push: If True, this vault will not receive pushed keys
|
||||
"""
|
||||
super().__init__(name, no_push)
|
||||
self.url = host
|
||||
self.password = password
|
||||
self.password = api_key or password
|
||||
if not self.password:
|
||||
raise ValueError("Either password or api_key is required")
|
||||
|
||||
self.username = username
|
||||
self.api_mode = api_mode.lower()
|
||||
self.current_title = None
|
||||
@@ -48,11 +60,15 @@ class HTTP(Vault):
|
||||
self.session.headers.update({"User-Agent": f"unshackle v{__version__}"})
|
||||
self.api_session_id = None
|
||||
|
||||
if self.api_mode == "decrypt_labs":
|
||||
self.session.headers.update({"decrypt-labs-api-key": self.password})
|
||||
self.no_push = True
|
||||
|
||||
# Validate configuration based on mode
|
||||
if self.api_mode == "query" and not self.username:
|
||||
raise ValueError("Username is required for query mode")
|
||||
elif self.api_mode not in ["query", "json"]:
|
||||
raise ValueError("api_mode must be either 'query' or 'json'")
|
||||
elif self.api_mode not in ["query", "json", "decrypt_labs"]:
|
||||
raise ValueError("api_mode must be either 'query', 'json', or 'decrypt_labs'")
|
||||
|
||||
def request(self, method: str, params: dict = None) -> dict:
|
||||
"""Make a request to the JSON API vault."""
|
||||
@@ -95,7 +111,51 @@ class HTTP(Vault):
|
||||
if isinstance(kid, UUID):
|
||||
kid = kid.hex
|
||||
|
||||
if self.api_mode == "json":
|
||||
if self.api_mode == "decrypt_labs":
|
||||
try:
|
||||
request_payload = {"service": service.lower(), "kid": kid}
|
||||
|
||||
response = self.session.post(self.url, json=request_payload)
|
||||
|
||||
if not response.ok:
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
|
||||
if data.get("message") != "success":
|
||||
return None
|
||||
|
||||
cached_keys = data.get("cached_keys")
|
||||
if not cached_keys:
|
||||
return None
|
||||
|
||||
if isinstance(cached_keys, str):
|
||||
try:
|
||||
cached_keys = json.loads(cached_keys)
|
||||
except json.JSONDecodeError:
|
||||
return cached_keys
|
||||
|
||||
if isinstance(cached_keys, dict):
|
||||
if cached_keys.get("kid") == kid:
|
||||
return cached_keys.get("key")
|
||||
if kid in cached_keys:
|
||||
return cached_keys[kid]
|
||||
elif isinstance(cached_keys, list):
|
||||
for entry in cached_keys:
|
||||
if isinstance(entry, dict):
|
||||
if entry.get("kid") == kid:
|
||||
return entry.get("key")
|
||||
elif isinstance(entry, str) and ":" in entry:
|
||||
entry_kid, entry_key = entry.split(":", 1)
|
||||
if entry_kid == kid:
|
||||
return entry_key
|
||||
|
||||
except Exception as e:
|
||||
print(f"Failed to get key from DecryptLabs ({e.__class__.__name__}: {e})")
|
||||
return None
|
||||
return None
|
||||
|
||||
elif self.api_mode == "json":
|
||||
try:
|
||||
params = {
|
||||
"kid": kid,
|
||||
@@ -132,7 +192,9 @@ class HTTP(Vault):
|
||||
return data["keys"][0]["key"]
|
||||
|
||||
def get_keys(self, service: str) -> Iterator[tuple[str, str]]:
|
||||
if self.api_mode == "json":
|
||||
if self.api_mode == "decrypt_labs":
|
||||
return iter([])
|
||||
elif self.api_mode == "json":
|
||||
# JSON API doesn't support getting all keys, so return empty iterator
|
||||
# This will cause the copy command to rely on the API's internal duplicate handling
|
||||
return iter([])
|
||||
@@ -153,6 +215,9 @@ class HTTP(Vault):
|
||||
if not key or key.count("0") == len(key):
|
||||
raise ValueError("You cannot add a NULL Content Key to a Vault.")
|
||||
|
||||
if self.api_mode == "decrypt_labs":
|
||||
return False
|
||||
|
||||
if isinstance(kid, UUID):
|
||||
kid = kid.hex
|
||||
|
||||
@@ -192,6 +257,9 @@ class HTTP(Vault):
|
||||
return data.get("status_code") == 200
|
||||
|
||||
def add_keys(self, service: str, kid_keys: dict[Union[UUID, str], str]) -> int:
|
||||
if self.api_mode == "decrypt_labs":
|
||||
return 0
|
||||
|
||||
for kid, key in kid_keys.items():
|
||||
if not key or key.count("0") == len(key):
|
||||
raise ValueError("You cannot add a NULL Content Key to a Vault.")
|
||||
@@ -243,7 +311,9 @@ class HTTP(Vault):
|
||||
return inserted_count
|
||||
|
||||
def get_services(self) -> Iterator[str]:
|
||||
if self.api_mode == "json":
|
||||
if self.api_mode == "decrypt_labs":
|
||||
return iter([])
|
||||
elif self.api_mode == "json":
|
||||
try:
|
||||
response = self.request("GetServices")
|
||||
services = response.get("services", [])
|
||||
@@ -283,6 +353,9 @@ class HTTP(Vault):
|
||||
if not key or key.count("0") == len(key):
|
||||
raise ValueError("You cannot add a NULL Content Key to a Vault.")
|
||||
|
||||
if self.api_mode == "decrypt_labs":
|
||||
return InsertResult.FAILURE
|
||||
|
||||
if isinstance(kid, UUID):
|
||||
kid = kid.hex
|
||||
|
||||
|
||||
Reference in New Issue
Block a user