mirror of
https://github.com/unshackle-dl/unshackle.git
synced 2025-10-23 15:11:08 +00:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63e9a78b2a | ||
|
|
a2bfe47993 | ||
|
|
cf4dc1ce76 | ||
|
|
40028c81d7 | ||
|
|
06df10cb58 | ||
|
|
d61bec4a8c | ||
|
|
058bb60502 | ||
|
|
7583129e8f | ||
|
|
4691694d2e | ||
|
|
a07345a0a2 | ||
|
|
091d7335a3 | ||
|
|
8c798b95c4 | ||
|
|
46c28fe943 | ||
|
|
22c9aa195e | ||
|
|
776d8f3df0 | ||
|
|
67caf71295 | ||
|
|
3ed76d199c | ||
|
|
4de9251f95 | ||
|
|
d2fb409ad9 | ||
|
|
fdff3a1c56 | ||
|
|
5d1f2eb458 | ||
|
|
3efac3d474 | ||
|
|
f578904b76 | ||
|
|
9f20159605 | ||
|
|
4decb0d107 | ||
|
|
80c40c8677 | ||
|
|
26ef48c889 | ||
|
|
5dad2746b1 | ||
|
|
24aa4647ed | ||
|
|
eeb553cb22 | ||
|
|
06c96b88a5 | ||
|
|
e8e376ad51 | ||
|
|
fbb140ec90 | ||
|
|
16a684c77f | ||
|
|
c97de0c32b | ||
|
|
c81b7f192e | ||
|
|
1b9fbe3401 | ||
|
|
f69eb691d7 | ||
|
|
05ef841282 | ||
|
|
454f19a0f7 | ||
|
|
4276267455 | ||
|
|
ab40dc1bf0 | ||
|
|
ec16e54c10 | ||
|
|
20285f4522 | ||
|
|
eaa5943b8e | ||
|
|
4385035b05 | ||
|
|
cb26ac6fa2 | ||
|
|
95674d5739 |
32
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: Sp5rky
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Run command uv run [...]
|
||||
2. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. Windows/Unix]
|
||||
- Version [e.g. 1.0.1]
|
||||
- Shaka-packager Version [e.g. 2.6.1]
|
||||
- n_m3u8dl-re Version [e.g. 0.3.0 beta]
|
||||
- Any additional software, such as subby/ccextractor/aria2c
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here, if you're reporting issues with services not running or working, please try to expand on where in your service it breaks but don't include service code (unless you have rights to do so.)
|
||||
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: Sp5rky
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
Other tools like Devine/VT had this function [...]
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
99
.github/workflows/docker.yml
vendored
99
.github/workflows/docker.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Build and Publish Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
paths: # run only when this file changed at all
|
||||
- "unshackle/core/__init__.py"
|
||||
pull_request: {} # optional – delete if you don’t build on PRs
|
||||
workflow_dispatch: {} # manual override
|
||||
|
||||
jobs:
|
||||
detect-version-change:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
changed: ${{ steps.vdiff.outputs.changed }}
|
||||
version: ${{ steps.vdiff.outputs.version }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with: { fetch-depth: 2 } # we need the previous commit :contentReference[oaicite:1]{index=1}
|
||||
|
||||
- name: Extract & compare version
|
||||
id: vdiff
|
||||
shell: bash
|
||||
run: |
|
||||
current=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
|
||||
prev=$(git show HEAD^:unshackle/core/__init__.py \
|
||||
| grep -oP '__version__ = "\K[^"]+' || echo '')
|
||||
echo "version=$current" >>"$GITHUB_OUTPUT"
|
||||
echo "changed=$([ "$current" != "$prev" ] && echo true || echo false)" >>"$GITHUB_OUTPUT"
|
||||
echo "Current=$current Previous=$prev"
|
||||
|
||||
build-and-push:
|
||||
needs: detect-version-change
|
||||
if: needs.detect-version-change.outputs.changed == 'true' # only run when bumped :contentReference[oaicite:2]{index=2}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: { contents: read, packages: write }
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract version from __init__.py
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "major_minor=$(echo $VERSION | cut -d. -f1-2)" >> $GITHUB_OUTPUT
|
||||
echo "major=$(echo $VERSION | cut -d. -f1)" >> $GITHUB_OUTPUT
|
||||
echo "Extracted version: $VERSION"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Container Registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=raw,value=v${{ steps.version.outputs.version }},enable={{is_default_branch}}
|
||||
type=raw,value=${{ steps.version.outputs.version }},enable={{is_default_branch}}
|
||||
type=raw,value=${{ steps.version.outputs.major_minor }},enable={{is_default_branch}}
|
||||
type=raw,value=${{ steps.version.outputs.major }},enable={{is_default_branch}}
|
||||
|
||||
- name: Show planned tags
|
||||
run: |
|
||||
echo "Planning to create the following tags:"
|
||||
echo "${{ steps.meta.outputs.tags }}"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Test Docker image
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
docker run --rm ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest env check
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
# unshackle
|
||||
unshackle.yaml
|
||||
unshackle.yml
|
||||
update_check.json
|
||||
*.mkv
|
||||
*.mp4
|
||||
*.exe
|
||||
@@ -18,7 +19,6 @@ device_cert
|
||||
device_client_id_blob
|
||||
device_private_key
|
||||
device_vmp_blob
|
||||
binaries/
|
||||
unshackle/cache/
|
||||
unshackle/cookies/
|
||||
unshackle/certs/
|
||||
|
||||
90
CHANGELOG.md
Normal file
90
CHANGELOG.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.3.0] - 2025-08-03
|
||||
|
||||
### Added
|
||||
|
||||
- **mp4decrypt Support**: Alternative DRM decryption method using mp4decrypt from Bento4
|
||||
- Added `mp4decrypt` binary detection and support in binaries module
|
||||
- New `decryption` configuration option in unshackle.yaml for service-specific decryption methods
|
||||
- Enhanced PlayReady and Widevine DRM classes with mp4decrypt decryption support
|
||||
- Service-specific decryption mapping allows choosing between `shaka` and `mp4decrypt` per service
|
||||
- Improved error handling and progress reporting for mp4decrypt operations
|
||||
|
||||
### Changed
|
||||
|
||||
- **DRM Decryption Architecture**: Enhanced decryption system with dual method support
|
||||
- Updated `dl.py` to handle service-specific decryption method selection
|
||||
- Refactored `Config` class to manage decryption method mapping per service
|
||||
- Enhanced DRM decrypt methods with `use_mp4decrypt` parameter for method selection
|
||||
|
||||
### Fixed
|
||||
|
||||
- **Service Track Filtering**: Cleaned up ATVP service to remove unnecessary track filtering
|
||||
- Simplified track return logic to pass all tracks to dl.py for centralized filtering
|
||||
- Removed unused codec and quality filter parameters from service initialization
|
||||
|
||||
## [1.2.0] - 2025-07-30
|
||||
|
||||
### Added
|
||||
|
||||
- **Update Checker**: Automatic GitHub release version checking on startup
|
||||
- Configurable update notifications via `update_checks` setting in unshackle.yaml
|
||||
- Non-blocking HTTP requests with 5-second timeout for performance
|
||||
- Smart semantic version comparison supporting all version formats (x.y.z, x.y, x)
|
||||
- Graceful error handling for network issues and API failures
|
||||
- User-friendly update notifications with current → latest version display
|
||||
- Direct links to GitHub releases page for easy updates
|
||||
- **HDR10+ Support**: Enhanced HDR10+ metadata processing for hybrid tracks
|
||||
- HDR10+ tool binary support (`hdr10plus_tool`) added to binaries module
|
||||
- HDR10+ to Dolby Vision conversion capabilities in hybrid processing
|
||||
- Enhanced metadata extraction for HDR10+ content
|
||||
- **Duration Fix Handling**: Added duration correction for video and hybrid tracks
|
||||
- **Temporary Directory Management**: Automatic creation of temp directories for attachment downloads
|
||||
|
||||
### Changed
|
||||
|
||||
- Enhanced configuration system with new `update_checks` boolean option (defaults to true)
|
||||
- Updated sample unshackle.yaml with update checker configuration documentation
|
||||
- Improved console styling consistency using `bright_black` for dimmed text
|
||||
- **Environment Dependency Check**: Complete overhaul with detailed categorization and status summary
|
||||
- Organized dependencies by category (Core, HDR, Download, Subtitle, Player, Network)
|
||||
- Enhanced status reporting with compact summary display
|
||||
- Improved tool requirement tracking and missing dependency alerts
|
||||
- **Hybrid Track Processing**: Significant improvements to HDR10+ and Dolby Vision handling
|
||||
- Enhanced metadata extraction and processing workflows
|
||||
- Better integration with HDR processing tools
|
||||
|
||||
### Removed
|
||||
|
||||
- **Docker Workflow**: Removed Docker build and publish GitHub Actions workflow for manual builds
|
||||
|
||||
## [1.1.0] - 2025-07-29
|
||||
|
||||
### Added
|
||||
|
||||
- **HDR10+DV Hybrid Processing**: New `-r HYBRID` command for processing HDR10 and Dolby Vision tracks
|
||||
- Support for hybrid HDR processing and injection using dovi_tool
|
||||
- New hybrid track processing module for seamless HDR10/DV conversion
|
||||
- Automatic detection and handling of HDR10 and DV metadata
|
||||
- Support for HDR10 and DV tracks in hybrid mode for EXAMPLE service
|
||||
- Binary availability check for dovi_tool in hybrid mode operations
|
||||
- Enhanced track processing capabilities for HDR content
|
||||
|
||||
### Fixed
|
||||
|
||||
- Import order issues and missing json import in hybrid processing
|
||||
- UV installation process and error handling improvements
|
||||
- Binary search functionality updated to use `binaries.find`
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated package version from 1.0.2 to 1.1.0
|
||||
- Enhanced dl.py command processing for hybrid mode support
|
||||
- Improved core titles (episode/movie) processing for HDR content
|
||||
- Extended tracks module with hybrid processing capabilities
|
||||
31
CONFIG.md
31
CONFIG.md
@@ -213,6 +213,37 @@ downloader:
|
||||
|
||||
The `default` entry is optional. If omitted, `requests` will be used for services not listed.
|
||||
|
||||
## decryption (str | dict)
|
||||
|
||||
Choose what software to use to decrypt DRM-protected content throughout unshackle where needed.
|
||||
You may provide a single decryption method globally or a mapping of service tags to
|
||||
decryption methods.
|
||||
|
||||
Options:
|
||||
|
||||
- `shaka` (default) - Shaka Packager - <https://github.com/shaka-project/shaka-packager>
|
||||
- `mp4decrypt` - mp4decrypt from Bento4 - <https://github.com/axiomatic-systems/Bento4>
|
||||
|
||||
Note that Shaka Packager is the traditional method and works with most services. mp4decrypt
|
||||
is an alternative that may work better with certain services that have specific encryption formats.
|
||||
|
||||
Example mapping:
|
||||
|
||||
```yaml
|
||||
decryption:
|
||||
ATVP: mp4decrypt
|
||||
AMZN: shaka
|
||||
default: shaka
|
||||
```
|
||||
|
||||
The `default` entry is optional. If omitted, `shaka` will be used for services not listed.
|
||||
|
||||
Simple configuration (single method for all services):
|
||||
|
||||
```yaml
|
||||
decryption: mp4decrypt
|
||||
```
|
||||
|
||||
## filenames (dict)
|
||||
|
||||
Override the default filenames used across unshackle.
|
||||
|
||||
@@ -14,6 +14,7 @@ unshackle is a fork of [Devine](https://github.com/devine-dl/devine/), a powerfu
|
||||
- 🎥 **Multi-Media Support** - Movies, TV episodes, and music
|
||||
- 🛠️ **Built-in Parsers** - DASH/HLS and ISM manifest support
|
||||
- 🔒 **DRM Support** - Widevine and PlayReady integration
|
||||
- 🌈 **HDR10+DV Hybrid** - Hybrid Dolby Vision injection via [dovi_tool](https://github.com/quietvoid/dovi_tool)
|
||||
- 💾 **Flexible Storage** - Local and remote key vaults
|
||||
- 👥 **Multi-Profile Auth** - Support for cookies and credentials
|
||||
- 🤖 **Smart Naming** - Automatic P2P-style filename structure
|
||||
@@ -54,12 +55,11 @@ docker run --rm ghcr.io/unshackle-dl/unshackle:latest env check
|
||||
|
||||
# Download content (mount directories for persistent data)
|
||||
docker run --rm \
|
||||
-v "$(pwd)/downloads:/downloads" \
|
||||
-v "$(pwd)/unshackle/downloads:/app/downloads" \
|
||||
-v "$(pwd)/unshackle/cookies:/app/unshackle/cookies" \
|
||||
-v "$(pwd)/unshackle/services:/app/unshackle/services" \
|
||||
-v "$(pwd)/unshackle/WVDs:/app/unshackle/WVDs" \
|
||||
-v "$(pwd)/unshackle/PRDs:/app/unshackle/PRDs" \
|
||||
-v "$(pwd)/temp:/app/temp" \
|
||||
-v "$(pwd)/unshackle/unshackle.yaml:/app/unshackle.yaml" \
|
||||
ghcr.io/unshackle-dl/unshackle:latest dl SERVICE_NAME CONTENT_ID
|
||||
|
||||
@@ -88,7 +88,6 @@ docker run --rm unshackle env check
|
||||
|
||||
## Planned Features
|
||||
|
||||
- 🌈 **HDR10+DV Hybrid Support** - Allow support for hybrid HDR10+ and Dolby Vision.
|
||||
- 🖥️ **Web UI Access & Control** - Manage and control unshackle from a modern web interface.
|
||||
- 🔄 **Sonarr/Radarr Interactivity** - Direct integration for automated personal downloads.
|
||||
- ⚙️ **Better ISM Support** - Improve on ISM support for multiple services
|
||||
|
||||
52
install.bat
52
install.bat
@@ -1,47 +1,61 @@
|
||||
@echo off
|
||||
echo Installing unshackle dependencies...
|
||||
setlocal EnableExtensions EnableDelayedExpansion
|
||||
|
||||
echo.
|
||||
echo === Unshackle setup (Windows) ===
|
||||
echo.
|
||||
|
||||
REM Check if UV is already installed
|
||||
uv --version >nul 2>&1
|
||||
where uv >nul 2>&1
|
||||
if %errorlevel% equ 0 (
|
||||
echo UV is already installed.
|
||||
echo [OK] uv is already installed.
|
||||
goto install_deps
|
||||
)
|
||||
|
||||
echo UV not found. Installing UV...
|
||||
echo.
|
||||
echo [..] uv not found. Installing...
|
||||
|
||||
REM Install UV using the official installer
|
||||
powershell -Command "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
powershell -NoProfile -ExecutionPolicy Bypass -Command "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
if %errorlevel% neq 0 (
|
||||
echo Failed to install UV. Please install UV manually from https://docs.astral.sh/uv/getting-started/installation/
|
||||
echo [ERR] Failed to install uv.
|
||||
echo PowerShell may be blocking scripts. Try:
|
||||
echo Set-ExecutionPolicy RemoteSigned -Scope CurrentUser
|
||||
echo or install manually: https://docs.astral.sh/uv/getting-started/installation/
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
REM Add UV to PATH for current session
|
||||
set "PATH=%USERPROFILE%\.cargo\bin;%PATH%"
|
||||
set "UV_BIN="
|
||||
for %%D in ("%USERPROFILE%\.local\bin" "%LOCALAPPDATA%\Programs\uv\bin" "%USERPROFILE%\.cargo\bin") do (
|
||||
if exist "%%~fD\uv.exe" set "UV_BIN=%%~fD"
|
||||
)
|
||||
|
||||
echo UV installed successfully.
|
||||
echo.
|
||||
if not defined UV_BIN (
|
||||
echo [WARN] Could not locate uv.exe. You may need to reopen your terminal.
|
||||
) else (
|
||||
set "PATH=%UV_BIN%;%PATH%"
|
||||
)
|
||||
|
||||
:: Verify
|
||||
uv --version >nul 2>&1
|
||||
if %errorlevel% neq 0 (
|
||||
echo [ERR] uv still not reachable in this shell. Open a new terminal and re-run this script.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
echo [OK] uv installed and reachable.
|
||||
|
||||
:install_deps
|
||||
echo Installing project dependencies in editable mode with dev dependencies...
|
||||
echo.
|
||||
|
||||
REM Install the project in editable mode with dev dependencies
|
||||
uv sync
|
||||
if %errorlevel% neq 0 (
|
||||
echo Failed to install dependencies. Please check the error messages above.
|
||||
echo [ERR] Dependency install failed. See errors above.
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo.
|
||||
echo Installation completed successfully!
|
||||
echo.
|
||||
echo You can now run unshackle using:
|
||||
echo Try:
|
||||
echo uv run unshackle --help
|
||||
echo.
|
||||
pause
|
||||
endlocal
|
||||
|
||||
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
||||
|
||||
[project]
|
||||
name = "unshackle"
|
||||
version = "1.0.1"
|
||||
version = "1.3.0"
|
||||
description = "Modular Movie, TV, and Music Archival Software."
|
||||
authors = [{ name = "unshackle team" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
@@ -57,6 +57,7 @@ dependencies = [
|
||||
"pyplayready>=0.6.0,<0.7",
|
||||
"httpx>=0.28.1,<0.29",
|
||||
"cryptography>=45.0.0",
|
||||
"subby",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -112,3 +113,4 @@ no_implicit_optional = true
|
||||
|
||||
[tool.uv.sources]
|
||||
unshackle = { workspace = true }
|
||||
subby = { git = "https://github.com/vevv/subby.git" }
|
||||
|
||||
0
unshackle/binaries/placehere.txt
Normal file
0
unshackle/binaries/placehere.txt
Normal file
@@ -65,7 +65,7 @@ def cfg(ctx: click.Context, key: str, value: str, unset: bool, list_: bool) -> N
|
||||
|
||||
if not is_write and not is_delete:
|
||||
data = data.mlget(key_items, default=KeyError)
|
||||
if data == KeyError:
|
||||
if data is KeyError:
|
||||
raise click.ClickException(f"Key '{key}' does not exist in the config.")
|
||||
yaml.dump(data, sys.stdout)
|
||||
else:
|
||||
|
||||
@@ -48,13 +48,14 @@ from unshackle.core.constants import DOWNLOAD_LICENCE_ONLY, AnyTrack, context_se
|
||||
from unshackle.core.credential import Credential
|
||||
from unshackle.core.drm import DRM_T, PlayReady, Widevine
|
||||
from unshackle.core.events import events
|
||||
from unshackle.core.proxies import Basic, Hola, NordVPN
|
||||
from unshackle.core.proxies import Basic, Hola, NordVPN, SurfsharkVPN
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.services import Services
|
||||
from unshackle.core.titles import Movie, Movies, Series, Song, Title_T
|
||||
from unshackle.core.titles.episode import Episode
|
||||
from unshackle.core.tracks import Audio, Subtitle, Tracks, Video
|
||||
from unshackle.core.tracks.attachment import Attachment
|
||||
from unshackle.core.tracks.hybrid import Hybrid
|
||||
from unshackle.core.utilities import get_system_fonts, is_close_match, time_elapsed_since
|
||||
from unshackle.core.utils import tags
|
||||
from unshackle.core.utils.click_types import (LANGUAGE_RANGE, QUALITY_LIST, SEASON_RANGE, ContextData, MultipleChoice,
|
||||
@@ -294,11 +295,41 @@ class dl:
|
||||
|
||||
with console.status("Loading Key Vaults...", spinner="dots"):
|
||||
self.vaults = Vaults(self.service)
|
||||
total_vaults = len(config.key_vaults)
|
||||
failed_vaults = []
|
||||
|
||||
for vault in config.key_vaults:
|
||||
vault_type = vault["type"]
|
||||
del vault["type"]
|
||||
self.vaults.load(vault_type, **vault)
|
||||
self.log.info(f"Loaded {len(self.vaults)} Vaults")
|
||||
vault_name = vault.get("name", vault_type)
|
||||
vault_copy = vault.copy()
|
||||
del vault_copy["type"]
|
||||
|
||||
if vault_type.lower() == "sqlite":
|
||||
try:
|
||||
self.vaults.load_critical(vault_type, **vault_copy)
|
||||
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
|
||||
except Exception as e:
|
||||
self.log.error(f"vault failure: {vault_name} ({vault_type}) - {e}")
|
||||
raise
|
||||
else:
|
||||
# Other vaults (MySQL, HTTP, API) - soft fail
|
||||
if not self.vaults.load(vault_type, **vault_copy):
|
||||
failed_vaults.append(vault_name)
|
||||
self.log.debug(f"Failed to load vault: {vault_name} ({vault_type})")
|
||||
else:
|
||||
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
|
||||
|
||||
loaded_count = len(self.vaults)
|
||||
if failed_vaults:
|
||||
self.log.warning(f"Failed to load {len(failed_vaults)} vault(s): {', '.join(failed_vaults)}")
|
||||
self.log.info(f"Loaded {loaded_count}/{total_vaults} Vaults")
|
||||
|
||||
# Debug: Show detailed vault status
|
||||
if loaded_count > 0:
|
||||
vault_names = [vault.name for vault in self.vaults]
|
||||
self.log.debug(f"Active vaults: {', '.join(vault_names)}")
|
||||
else:
|
||||
self.log.debug("No vaults are currently active")
|
||||
|
||||
self.proxy_providers = []
|
||||
if no_proxy:
|
||||
@@ -309,6 +340,8 @@ class dl:
|
||||
self.proxy_providers.append(Basic(**config.proxy_providers["basic"]))
|
||||
if config.proxy_providers.get("nordvpn"):
|
||||
self.proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"]))
|
||||
if config.proxy_providers.get("surfsharkvpn"):
|
||||
self.proxy_providers.append(SurfsharkVPN(**config.proxy_providers["surfsharkvpn"]))
|
||||
if binaries.HolaProxy:
|
||||
self.proxy_providers.append(Hola())
|
||||
for proxy_provider in self.proxy_providers:
|
||||
@@ -397,6 +430,15 @@ class dl:
|
||||
self.tmdb_searched = False
|
||||
start_time = time.time()
|
||||
|
||||
# Check if dovi_tool is available when hybrid mode is requested
|
||||
if any(r == Video.Range.HYBRID for r in range_):
|
||||
from unshackle.core.binaries import DoviTool
|
||||
|
||||
if not DoviTool:
|
||||
self.log.error("Unable to run hybrid mode: dovi_tool not detected")
|
||||
self.log.error("Please install dovi_tool from https://github.com/quietvoid/dovi_tool")
|
||||
sys.exit(1)
|
||||
|
||||
if cdm_only is None:
|
||||
vaults_only = None
|
||||
else:
|
||||
@@ -537,6 +579,8 @@ class dl:
|
||||
sys.exit(1)
|
||||
|
||||
if range_:
|
||||
# Special handling for HYBRID - don't filter, keep all HDR10 and DV tracks
|
||||
if Video.Range.HYBRID not in range_:
|
||||
title.tracks.select_video(lambda x: x.range in range_)
|
||||
missing_ranges = [r for r in range_ if not any(x.range == r for x in title.tracks.videos)]
|
||||
for color_range in missing_ranges:
|
||||
@@ -557,31 +601,53 @@ class dl:
|
||||
sys.exit(1)
|
||||
|
||||
if quality:
|
||||
title.tracks.by_resolutions(quality)
|
||||
missing_resolutions = []
|
||||
if any(r == Video.Range.HYBRID for r in range_):
|
||||
title.tracks.select_video(title.tracks.select_hybrid(title.tracks.videos, quality))
|
||||
else:
|
||||
title.tracks.by_resolutions(quality)
|
||||
|
||||
for resolution in quality:
|
||||
if any(video.height == resolution for video in title.tracks.videos):
|
||||
if any(v.height == resolution for v in title.tracks.videos):
|
||||
continue
|
||||
if any(int(video.width * (9 / 16)) == resolution for video in title.tracks.videos):
|
||||
if any(int(v.width * 9 / 16) == resolution for v in title.tracks.videos):
|
||||
continue
|
||||
missing_resolutions.append(resolution)
|
||||
|
||||
if missing_resolutions:
|
||||
res_list = ""
|
||||
if len(missing_resolutions) > 1:
|
||||
res_list = (", ".join([f"{x}p" for x in missing_resolutions[:-1]])) + " or "
|
||||
res_list = ", ".join([f"{x}p" for x in missing_resolutions[:-1]]) + " or "
|
||||
res_list = f"{res_list}{missing_resolutions[-1]}p"
|
||||
plural = "s" if len(missing_resolutions) > 1 else ""
|
||||
self.log.error(f"There's no {res_list} Video Track{plural}...")
|
||||
sys.exit(1)
|
||||
|
||||
# choose best track by range and quality
|
||||
if any(r == Video.Range.HYBRID for r in range_):
|
||||
# For hybrid mode, always apply hybrid selection
|
||||
# If no quality specified, use only the best (highest) resolution
|
||||
if not quality:
|
||||
# Get the highest resolution available
|
||||
best_resolution = max((v.height for v in title.tracks.videos), default=None)
|
||||
if best_resolution:
|
||||
# Use the hybrid selection logic with only the best resolution
|
||||
title.tracks.select_video(
|
||||
title.tracks.select_hybrid(title.tracks.videos, [best_resolution])
|
||||
)
|
||||
# If quality was specified, hybrid selection was already applied above
|
||||
else:
|
||||
selected_videos: list[Video] = []
|
||||
for resolution, color_range in product(quality or [None], range_ or [None]):
|
||||
match = next(
|
||||
(
|
||||
t
|
||||
for t in title.tracks.videos
|
||||
if (not resolution or t.height == resolution or int(t.width * (9 / 16)) == resolution)
|
||||
if (
|
||||
not resolution
|
||||
or t.height == resolution
|
||||
or int(t.width * (9 / 16)) == resolution
|
||||
)
|
||||
and (not color_range or t.range == color_range)
|
||||
),
|
||||
None,
|
||||
@@ -699,7 +765,8 @@ class dl:
|
||||
DOWNLOAD_LICENCE_ONLY.set()
|
||||
|
||||
try:
|
||||
with Live(Padding(download_table, (1, 5)), console=console, refresh_per_second=5):
|
||||
# Use transient mode to prevent display remnants
|
||||
with Live(Padding(download_table, (1, 5)), console=console, refresh_per_second=5, transient=True):
|
||||
with ThreadPoolExecutor(downloads) as pool:
|
||||
for download in futures.as_completed(
|
||||
(
|
||||
@@ -845,6 +912,31 @@ class dl:
|
||||
if font_count:
|
||||
self.log.info(f"Attached {font_count} fonts for the Subtitles")
|
||||
|
||||
# Handle DRM decryption BEFORE repacking (must decrypt first!)
|
||||
service_name = service.__class__.__name__.upper()
|
||||
decryption_method = config.decryption_map.get(service_name, config.decryption)
|
||||
use_mp4decrypt = decryption_method.lower() == "mp4decrypt"
|
||||
|
||||
if use_mp4decrypt:
|
||||
decrypt_tool = "mp4decrypt"
|
||||
else:
|
||||
decrypt_tool = "Shaka Packager"
|
||||
|
||||
drm_tracks = [track for track in title.tracks if track.drm]
|
||||
if drm_tracks:
|
||||
with console.status(f"Decrypting tracks with {decrypt_tool}..."):
|
||||
has_decrypted = False
|
||||
for track in drm_tracks:
|
||||
for drm in track.drm:
|
||||
if hasattr(drm, "decrypt"):
|
||||
drm.decrypt(track.path, use_mp4decrypt=use_mp4decrypt)
|
||||
has_decrypted = True
|
||||
events.emit(events.Types.TRACK_REPACKED, track=track)
|
||||
break
|
||||
if has_decrypted:
|
||||
self.log.info(f"Decrypted tracks with {decrypt_tool}")
|
||||
|
||||
# Now repack the decrypted tracks
|
||||
with console.status("Repackaging tracks with FFMPEG..."):
|
||||
has_repacked = False
|
||||
for track in title.tracks:
|
||||
@@ -869,6 +961,64 @@ class dl:
|
||||
)
|
||||
|
||||
multiplex_tasks: list[tuple[TaskID, Tracks]] = []
|
||||
|
||||
# Check if we're in hybrid mode
|
||||
if any(r == Video.Range.HYBRID for r in range_) and title.tracks.videos:
|
||||
# Hybrid mode: process DV and HDR10 tracks separately for each resolution
|
||||
self.log.info("Processing Hybrid HDR10+DV tracks...")
|
||||
|
||||
# Group video tracks by resolution
|
||||
resolutions_processed = set()
|
||||
hdr10_tracks = [v for v in title.tracks.videos if v.range == Video.Range.HDR10]
|
||||
dv_tracks = [v for v in title.tracks.videos if v.range == Video.Range.DV]
|
||||
|
||||
for hdr10_track in hdr10_tracks:
|
||||
resolution = hdr10_track.height
|
||||
if resolution in resolutions_processed:
|
||||
continue
|
||||
resolutions_processed.add(resolution)
|
||||
|
||||
# Find matching DV track for this resolution (use the lowest DV resolution)
|
||||
matching_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
|
||||
|
||||
if matching_dv:
|
||||
# Create track pair for this resolution
|
||||
resolution_tracks = [hdr10_track, matching_dv]
|
||||
|
||||
for track in resolution_tracks:
|
||||
track.needs_duration_fix = True
|
||||
|
||||
# Run the hybrid processing for this resolution
|
||||
Hybrid(resolution_tracks, self.service)
|
||||
|
||||
# Create unique output filename for this resolution
|
||||
hybrid_filename = f"HDR10-DV-{resolution}p.hevc"
|
||||
hybrid_output_path = config.directories.temp / hybrid_filename
|
||||
|
||||
# The Hybrid class creates HDR10-DV.hevc, rename it for this resolution
|
||||
default_output = config.directories.temp / "HDR10-DV.hevc"
|
||||
if default_output.exists():
|
||||
shutil.move(str(default_output), str(hybrid_output_path))
|
||||
|
||||
# Create a mux task for this resolution
|
||||
task_description = f"Multiplexing Hybrid HDR10+DV {resolution}p"
|
||||
task_id = progress.add_task(f"{task_description}...", total=None, start=False)
|
||||
|
||||
# Create tracks with the hybrid video output for this resolution
|
||||
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments
|
||||
|
||||
# Create a new video track for the hybrid output
|
||||
hybrid_track = deepcopy(hdr10_track)
|
||||
hybrid_track.path = hybrid_output_path
|
||||
hybrid_track.range = Video.Range.DV # It's now a DV track
|
||||
hybrid_track.needs_duration_fix = True
|
||||
task_tracks.videos = [hybrid_track]
|
||||
|
||||
multiplex_tasks.append((task_id, task_tracks))
|
||||
|
||||
console.print()
|
||||
else:
|
||||
# Normal mode: process each video track separately
|
||||
for video_track in title.tracks.videos or [None]:
|
||||
task_description = "Multiplexing"
|
||||
if video_track:
|
||||
@@ -885,7 +1035,7 @@ class dl:
|
||||
|
||||
multiplex_tasks.append((task_id, task_tracks))
|
||||
|
||||
with Live(Padding(progress, (0, 5, 1, 5)), console=console):
|
||||
with Live(Padding(progress, (0, 5, 1, 5)), console=console, transient=True):
|
||||
for task_id, task_tracks in multiplex_tasks:
|
||||
progress.start_task(task_id) # TODO: Needed?
|
||||
muxed_path, return_code, errors = task_tracks.mux(
|
||||
|
||||
@@ -10,11 +10,11 @@ from rich.padding import Padding
|
||||
from rich.table import Table
|
||||
from rich.tree import Tree
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.config import POSSIBLE_CONFIG_PATHS, config, config_path
|
||||
from unshackle.core.console import console
|
||||
from unshackle.core.constants import context_settings
|
||||
from unshackle.core.services import Services
|
||||
from unshackle.core.utils.osenvironment import get_os_arch
|
||||
|
||||
|
||||
@click.group(short_help="Manage and configure the project environment.", context_settings=context_settings)
|
||||
@@ -25,45 +25,134 @@ def env() -> None:
|
||||
@env.command()
|
||||
def check() -> None:
|
||||
"""Checks environment for the required dependencies."""
|
||||
table = Table(title="Dependencies", expand=True)
|
||||
table.add_column("Name", no_wrap=True)
|
||||
table.add_column("Installed", justify="center")
|
||||
table.add_column("Path", no_wrap=False, overflow="fold")
|
||||
|
||||
# builds shaka-packager based on os, arch
|
||||
packager_dep = get_os_arch("packager")
|
||||
|
||||
# Helper function to find binary with multiple possible names
|
||||
def find_binary(*names):
|
||||
for name in names:
|
||||
if shutil.which(name):
|
||||
return name
|
||||
return names[0] # Return first name as fallback for display
|
||||
|
||||
dependencies = [
|
||||
{"name": "CCExtractor", "binary": "ccextractor"},
|
||||
{"name": "FFMpeg", "binary": "ffmpeg"},
|
||||
{"name": "MKVToolNix", "binary": "mkvmerge"},
|
||||
{"name": "Shaka-Packager", "binary": packager_dep},
|
||||
{"name": "N_m3u8DL-RE", "binary": find_binary("N_m3u8DL-RE", "n-m3u8dl-re")},
|
||||
{"name": "Aria2(c)", "binary": "aria2c"},
|
||||
# Define all dependencies
|
||||
all_deps = [
|
||||
# Core Media Tools
|
||||
{"name": "FFmpeg", "binary": binaries.FFMPEG, "required": True, "desc": "Media processing", "cat": "Core"},
|
||||
{"name": "FFprobe", "binary": binaries.FFProbe, "required": True, "desc": "Media analysis", "cat": "Core"},
|
||||
{"name": "MKVToolNix", "binary": binaries.MKVToolNix, "required": True, "desc": "MKV muxing", "cat": "Core"},
|
||||
{
|
||||
"name": "mkvpropedit",
|
||||
"binary": binaries.Mkvpropedit,
|
||||
"required": True,
|
||||
"desc": "MKV metadata",
|
||||
"cat": "Core",
|
||||
},
|
||||
{
|
||||
"name": "shaka-packager",
|
||||
"binary": binaries.ShakaPackager,
|
||||
"required": True,
|
||||
"desc": "DRM decryption",
|
||||
"cat": "DRM",
|
||||
},
|
||||
{
|
||||
"name": "mp4decrypt",
|
||||
"binary": binaries.Mp4decrypt,
|
||||
"required": False,
|
||||
"desc": "DRM decryption",
|
||||
"cat": "DRM",
|
||||
},
|
||||
# HDR Processing
|
||||
{"name": "dovi_tool", "binary": binaries.DoviTool, "required": False, "desc": "Dolby Vision", "cat": "HDR"},
|
||||
{
|
||||
"name": "HDR10Plus_tool",
|
||||
"binary": binaries.HDR10PlusTool,
|
||||
"required": False,
|
||||
"desc": "HDR10+ metadata",
|
||||
"cat": "HDR",
|
||||
},
|
||||
# Downloaders
|
||||
{"name": "aria2c", "binary": binaries.Aria2, "required": False, "desc": "Multi-thread DL", "cat": "Download"},
|
||||
{
|
||||
"name": "N_m3u8DL-RE",
|
||||
"binary": binaries.N_m3u8DL_RE,
|
||||
"required": False,
|
||||
"desc": "HLS/DASH/ISM",
|
||||
"cat": "Download",
|
||||
},
|
||||
# Subtitle Tools
|
||||
{
|
||||
"name": "SubtitleEdit",
|
||||
"binary": binaries.SubtitleEdit,
|
||||
"required": False,
|
||||
"desc": "Sub conversion",
|
||||
"cat": "Subtitle",
|
||||
},
|
||||
{
|
||||
"name": "CCExtractor",
|
||||
"binary": binaries.CCExtractor,
|
||||
"required": False,
|
||||
"desc": "CC extraction",
|
||||
"cat": "Subtitle",
|
||||
},
|
||||
# Media Players
|
||||
{"name": "FFplay", "binary": binaries.FFPlay, "required": False, "desc": "Simple player", "cat": "Player"},
|
||||
{"name": "MPV", "binary": binaries.MPV, "required": False, "desc": "Advanced player", "cat": "Player"},
|
||||
# Network Tools
|
||||
{
|
||||
"name": "HolaProxy",
|
||||
"binary": binaries.HolaProxy,
|
||||
"required": False,
|
||||
"desc": "Proxy service",
|
||||
"cat": "Network",
|
||||
},
|
||||
{"name": "Caddy", "binary": binaries.Caddy, "required": False, "desc": "Web server", "cat": "Network"},
|
||||
]
|
||||
|
||||
for dep in dependencies:
|
||||
path = shutil.which(dep["binary"])
|
||||
# Track overall status
|
||||
all_required_installed = True
|
||||
total_installed = 0
|
||||
total_required = 0
|
||||
missing_required = []
|
||||
|
||||
# Create a single table
|
||||
table = Table(
|
||||
title="Environment Dependencies", title_style="bold", show_header=True, header_style="bold", expand=False
|
||||
)
|
||||
table.add_column("Category", style="bold cyan", width=10)
|
||||
table.add_column("Tool", width=16)
|
||||
table.add_column("Status", justify="center", width=10)
|
||||
table.add_column("Req", justify="center", width=4)
|
||||
table.add_column("Purpose", style="bright_black", width=20)
|
||||
|
||||
last_cat = None
|
||||
for dep in all_deps:
|
||||
path = dep["binary"]
|
||||
|
||||
# Category column (only show when it changes)
|
||||
category = dep["cat"] if dep["cat"] != last_cat else ""
|
||||
last_cat = dep["cat"]
|
||||
|
||||
# Status
|
||||
if path:
|
||||
installed = "[green]:heavy_check_mark:[/green]"
|
||||
path_output = path.lower()
|
||||
status = "[green]✓[/green]"
|
||||
total_installed += 1
|
||||
else:
|
||||
installed = "[red]:x:[/red]"
|
||||
path_output = "Not Found"
|
||||
status = "[red]✗[/red]"
|
||||
if dep["required"]:
|
||||
all_required_installed = False
|
||||
missing_required.append(dep["name"])
|
||||
|
||||
# Add to the table
|
||||
table.add_row(dep["name"], installed, path_output)
|
||||
if dep["required"]:
|
||||
total_required += 1
|
||||
|
||||
# Display the result
|
||||
console.print(Padding(table, (1, 5)))
|
||||
# Required column (compact)
|
||||
req = "[red]Y[/red]" if dep["required"] else "[bright_black]-[/bright_black]"
|
||||
|
||||
# Add row
|
||||
table.add_row(category, dep["name"], status, req, dep["desc"])
|
||||
|
||||
console.print(Padding(table, (1, 2)))
|
||||
|
||||
# Compact summary
|
||||
summary_parts = [f"[bold]Total:[/bold] {total_installed}/{len(all_deps)}"]
|
||||
|
||||
if all_required_installed:
|
||||
summary_parts.append("[green]All required tools installed ✓[/green]")
|
||||
else:
|
||||
summary_parts.append(f"[red]Missing required: {', '.join(missing_required)}[/red]")
|
||||
|
||||
console.print(Padding(" ".join(summary_parts), (1, 2)))
|
||||
|
||||
|
||||
@env.command()
|
||||
@@ -79,7 +168,7 @@ def info() -> None:
|
||||
tree.add(f"[repr.number]{i}.[/] [text2]{path.resolve()}[/]")
|
||||
console.print(Padding(tree, (0, 5)))
|
||||
|
||||
table = Table(title="Directories", expand=True)
|
||||
table = Table(title="Directories", title_style="bold", expand=True)
|
||||
table.add_column("Name", no_wrap=True)
|
||||
table.add_column("Path", no_wrap=False, overflow="fold")
|
||||
|
||||
@@ -92,7 +181,16 @@ def info() -> None:
|
||||
for name in sorted(dir(config.directories)):
|
||||
if name.startswith("__") or name == "app_dirs":
|
||||
continue
|
||||
path = getattr(config.directories, name).resolve()
|
||||
attr_value = getattr(config.directories, name)
|
||||
|
||||
# Handle both single Path objects and lists of Path objects
|
||||
if isinstance(attr_value, list):
|
||||
# For lists, show each path on a separate line
|
||||
paths_str = "\n".join(str(path.resolve()) for path in attr_value)
|
||||
table.add_row(name.title(), paths_str)
|
||||
else:
|
||||
# For single Path objects, use the original logic
|
||||
path = attr_value.resolve()
|
||||
for var, var_path in path_vars.items():
|
||||
if path.is_relative_to(var_path):
|
||||
path = rf"%{var}%\{path.relative_to(var_path)}"
|
||||
|
||||
@@ -46,7 +46,8 @@ def copy(to_vault: str, from_vaults: list[str], service: Optional[str] = None) -
|
||||
vault_type = vault["type"]
|
||||
vault_args = vault.copy()
|
||||
del vault_args["type"]
|
||||
vaults.load(vault_type, **vault_args)
|
||||
if not vaults.load(vault_type, **vault_args):
|
||||
raise click.ClickException(f"Failed to load vault ({vault_name}).")
|
||||
|
||||
to_vault: Vault = vaults.vaults[0]
|
||||
from_vaults: list[Vault] = vaults.vaults[1:]
|
||||
|
||||
@@ -16,7 +16,7 @@ from unshackle.core import binaries
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import console
|
||||
from unshackle.core.constants import context_settings
|
||||
from unshackle.core.proxies import Basic, Hola, NordVPN
|
||||
from unshackle.core.proxies import Basic, Hola, NordVPN, SurfsharkVPN
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.services import Services
|
||||
from unshackle.core.utils.click_types import ContextData
|
||||
@@ -69,6 +69,8 @@ def search(ctx: click.Context, no_proxy: bool, profile: Optional[str] = None, pr
|
||||
proxy_providers.append(Basic(**config.proxy_providers["basic"]))
|
||||
if config.proxy_providers.get("nordvpn"):
|
||||
proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"]))
|
||||
if config.proxy_providers.get("surfsharkvpn"):
|
||||
proxy_providers.append(SurfsharkVPN(**config.proxy_providers["surfsharkvpn"]))
|
||||
if binaries.HolaProxy:
|
||||
proxy_providers.append(Hola())
|
||||
for proxy_provider in proxy_providers:
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "1.0.1"
|
||||
__version__ = "1.3.0"
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import atexit
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
@@ -16,6 +15,7 @@ from unshackle.core.commands import Commands
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import ComfyRichHandler, console
|
||||
from unshackle.core.constants import context_settings
|
||||
from unshackle.core.update_checker import UpdateChecker
|
||||
from unshackle.core.utilities import rotate_log_file
|
||||
|
||||
LOGGING_PATH = None
|
||||
@@ -69,7 +69,7 @@ def main(version: bool, debug: bool, log_path: Path) -> None:
|
||||
r" ▀▀▀ ▀▀ █▪ ▀▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀ ·▀ ▀.▀▀▀ ▀▀▀ ",
|
||||
style="ascii.art",
|
||||
),
|
||||
f"v[repr.number]{__version__}[/]",
|
||||
"v 3.3.3 Copyright © 2019-2025 rlaphoenix" + f"\nv [repr.number]{__version__}[/] - unshackle",
|
||||
),
|
||||
(1, 11, 1, 10),
|
||||
expand=True,
|
||||
@@ -80,6 +80,22 @@ def main(version: bool, debug: bool, log_path: Path) -> None:
|
||||
if version:
|
||||
return
|
||||
|
||||
if config.update_checks:
|
||||
try:
|
||||
latest_version = UpdateChecker.check_for_updates_sync(__version__)
|
||||
if latest_version:
|
||||
console.print(
|
||||
f"\n[yellow]⚠️ Update available![/yellow] "
|
||||
f"Current: {__version__} → Latest: [green]{latest_version}[/green]",
|
||||
justify="center",
|
||||
)
|
||||
console.print(
|
||||
"Visit: https://github.com/unshackle-dl/unshackle/releases/latest\n",
|
||||
justify="center",
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@atexit.register
|
||||
def save_log():
|
||||
|
||||
@@ -8,7 +8,24 @@ __shaka_platform = {"win32": "win", "darwin": "osx"}.get(sys.platform, sys.platf
|
||||
|
||||
def find(*names: str) -> Optional[Path]:
|
||||
"""Find the path of the first found binary name."""
|
||||
# Get the directory containing this file to find the local binaries folder
|
||||
current_dir = Path(__file__).parent.parent
|
||||
local_binaries_dir = current_dir / "binaries"
|
||||
|
||||
for name in names:
|
||||
# First check local binaries folder
|
||||
if local_binaries_dir.exists():
|
||||
local_path = local_binaries_dir / name
|
||||
if local_path.is_file() and local_path.stat().st_mode & 0o111: # Check if executable
|
||||
return local_path
|
||||
|
||||
# Also check with .exe extension on Windows
|
||||
if sys.platform == "win32":
|
||||
local_path_exe = local_binaries_dir / f"{name}.exe"
|
||||
if local_path_exe.is_file():
|
||||
return local_path_exe
|
||||
|
||||
# Fall back to system PATH
|
||||
path = shutil.which(name)
|
||||
if path:
|
||||
return Path(path)
|
||||
@@ -32,6 +49,11 @@ HolaProxy = find("hola-proxy")
|
||||
MPV = find("mpv")
|
||||
Caddy = find("caddy")
|
||||
N_m3u8DL_RE = find("N_m3u8DL-RE", "n-m3u8dl-re")
|
||||
MKVToolNix = find("mkvmerge")
|
||||
Mkvpropedit = find("mkvpropedit")
|
||||
DoviTool = find("dovi_tool")
|
||||
HDR10PlusTool = find("hdr10plus_tool", "HDR10Plus_tool")
|
||||
Mp4decrypt = find("mp4decrypt")
|
||||
|
||||
|
||||
__all__ = (
|
||||
@@ -46,5 +68,10 @@ __all__ = (
|
||||
"MPV",
|
||||
"Caddy",
|
||||
"N_m3u8DL_RE",
|
||||
"MKVToolNix",
|
||||
"Mkvpropedit",
|
||||
"DoviTool",
|
||||
"HDR10PlusTool",
|
||||
"Mp4decrypt",
|
||||
"find",
|
||||
)
|
||||
|
||||
@@ -14,7 +14,7 @@ class Config:
|
||||
core_dir = Path(__file__).resolve().parent
|
||||
namespace_dir = core_dir.parent
|
||||
commands = namespace_dir / "commands"
|
||||
services = namespace_dir / "services"
|
||||
services = [namespace_dir / "services"]
|
||||
vaults = namespace_dir / "vaults"
|
||||
fonts = namespace_dir / "fonts"
|
||||
user_configs = core_dir.parent
|
||||
@@ -45,12 +45,16 @@ class Config:
|
||||
self.curl_impersonate: dict = kwargs.get("curl_impersonate") or {}
|
||||
self.remote_cdm: list[dict] = kwargs.get("remote_cdm") or []
|
||||
self.credentials: dict = kwargs.get("credentials") or {}
|
||||
self.subtitle: dict = kwargs.get("subtitle") or {}
|
||||
|
||||
self.directories = self._Directories()
|
||||
for name, path in (kwargs.get("directories") or {}).items():
|
||||
if name.lower() in ("app_dirs", "core_dir", "namespace_dir", "user_configs", "data"):
|
||||
# these must not be modified by the user
|
||||
continue
|
||||
if name == "services" and isinstance(path, list):
|
||||
setattr(self.directories, name, [Path(p).expanduser() for p in path])
|
||||
else:
|
||||
setattr(self.directories, name, Path(path).expanduser())
|
||||
|
||||
downloader_cfg = kwargs.get("downloader") or "requests"
|
||||
@@ -68,13 +72,23 @@ class Config:
|
||||
self.headers: dict = kwargs.get("headers") or {}
|
||||
self.key_vaults: list[dict[str, Any]] = kwargs.get("key_vaults", [])
|
||||
self.muxing: dict = kwargs.get("muxing") or {}
|
||||
self.nordvpn: dict = kwargs.get("nordvpn") or {}
|
||||
self.proxy_providers: dict = kwargs.get("proxy_providers") or {}
|
||||
self.serve: dict = kwargs.get("serve") or {}
|
||||
self.services: dict = kwargs.get("services") or {}
|
||||
decryption_cfg = kwargs.get("decryption") or {}
|
||||
if isinstance(decryption_cfg, dict):
|
||||
self.decryption_map = {k.upper(): v for k, v in decryption_cfg.items()}
|
||||
self.decryption = self.decryption_map.get("DEFAULT", "shaka")
|
||||
else:
|
||||
self.decryption_map = {}
|
||||
self.decryption = decryption_cfg or "shaka"
|
||||
|
||||
self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False)
|
||||
self.tag: str = kwargs.get("tag") or ""
|
||||
self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or ""
|
||||
self.update_checks: bool = kwargs.get("update_checks", True)
|
||||
self.update_check_interval: int = kwargs.get("update_check_interval", 24)
|
||||
self.scene_naming: bool = kwargs.get("scene_naming", True)
|
||||
|
||||
@classmethod
|
||||
def from_yaml(cls, path: Path) -> Config:
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import atexit
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from types import ModuleType
|
||||
from typing import IO, Callable, Iterable, List, Literal, Mapping, Optional, Union
|
||||
@@ -167,6 +170,8 @@ class ComfyConsole(Console):
|
||||
time.monotonic.
|
||||
"""
|
||||
|
||||
_cleanup_registered = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
@@ -233,6 +238,9 @@ class ComfyConsole(Console):
|
||||
if log_renderer:
|
||||
self._log_render = log_renderer
|
||||
|
||||
# Register terminal cleanup handlers
|
||||
self._register_cleanup()
|
||||
|
||||
def status(
|
||||
self,
|
||||
status: RenderableType,
|
||||
@@ -283,6 +291,38 @@ class ComfyConsole(Console):
|
||||
|
||||
return status_renderable
|
||||
|
||||
def _register_cleanup(self):
|
||||
"""Register terminal cleanup handlers."""
|
||||
if not ComfyConsole._cleanup_registered:
|
||||
ComfyConsole._cleanup_registered = True
|
||||
|
||||
# Register cleanup on normal exit
|
||||
atexit.register(self._cleanup_terminal)
|
||||
|
||||
# Register cleanup on signals
|
||||
signal.signal(signal.SIGINT, self._signal_handler)
|
||||
signal.signal(signal.SIGTERM, self._signal_handler)
|
||||
|
||||
def _cleanup_terminal(self):
|
||||
"""Restore terminal to a clean state."""
|
||||
try:
|
||||
# Show cursor using ANSI escape codes
|
||||
sys.stdout.write("\x1b[?25h") # Show cursor
|
||||
sys.stdout.write("\x1b[0m") # Reset attributes
|
||||
sys.stdout.flush()
|
||||
|
||||
# Also use Rich's method
|
||||
self.show_cursor(True)
|
||||
except Exception:
|
||||
# Silently fail if cleanup fails
|
||||
pass
|
||||
|
||||
def _signal_handler(self, signum, frame):
|
||||
"""Handle signals with cleanup."""
|
||||
self._cleanup_terminal()
|
||||
# Exit after cleanup
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
catppuccin_mocha = {
|
||||
# Colors based on "CatppuccinMocha" from Gogh themes
|
||||
|
||||
@@ -7,7 +7,7 @@ DOWNLOAD_LICENCE_ONLY = Event()
|
||||
DRM_SORT_MAP = ["ClearKey", "Widevine"]
|
||||
LANGUAGE_MAX_DISTANCE = 5 # this is max to be considered "same", e.g., en, en-US, en-AU
|
||||
VIDEO_CODEC_MAP = {"AVC": "H.264", "HEVC": "H.265"}
|
||||
DYNAMIC_RANGE_MAP = {"HDR10": "HDR", "HDR10+": "HDR", "Dolby Vision": "DV"}
|
||||
DYNAMIC_RANGE_MAP = {"HDR10": "HDR", "HDR10+": "HDR10P", "Dolby Vision": "DV", "HDR10 / HDR10+": "HDR10P", "HDR10 / HDR10": "HDR"}
|
||||
AUDIO_CODEC_MAP = {"E-AC-3": "DDP", "AC-3": "DD"}
|
||||
|
||||
context_settings = dict(
|
||||
|
||||
@@ -76,6 +76,11 @@ def download(url: str, save_path: Path, session: Session, **kwargs: Any) -> Gene
|
||||
|
||||
try:
|
||||
content_length = int(stream.headers.get("Content-Length", "0"))
|
||||
|
||||
# Skip Content-Length validation for compressed responses since
|
||||
# curl_impersonate automatically decompresses but Content-Length shows compressed size
|
||||
if stream.headers.get("Content-Encoding", "").lower() in ["gzip", "deflate", "br"]:
|
||||
content_length = 0
|
||||
except ValueError:
|
||||
content_length = 0
|
||||
|
||||
|
||||
@@ -90,6 +90,11 @@ def download(
|
||||
if not segmented:
|
||||
try:
|
||||
content_length = int(stream.headers.get("Content-Length", "0"))
|
||||
|
||||
# Skip Content-Length validation for compressed responses since
|
||||
# requests automatically decompresses but Content-Length shows compressed size
|
||||
if stream.headers.get("Content-Encoding", "").lower() in ["gzip", "deflate", "br"]:
|
||||
content_length = 0
|
||||
except ValueError:
|
||||
content_length = 0
|
||||
|
||||
|
||||
@@ -187,14 +187,69 @@ class PlayReady:
|
||||
if not self.content_keys:
|
||||
raise PlayReady.Exceptions.EmptyLicense("No Content Keys were within the License")
|
||||
|
||||
def decrypt(self, path: Path) -> None:
|
||||
def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
|
||||
"""
|
||||
Decrypt a Track with PlayReady DRM.
|
||||
Args:
|
||||
path: Path to the encrypted file to decrypt
|
||||
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
|
||||
Raises:
|
||||
EnvironmentError if the required decryption executable could not be found.
|
||||
ValueError if the track has not yet been downloaded.
|
||||
SubprocessError if the decryption process returned a non-zero exit code.
|
||||
"""
|
||||
if not self.content_keys:
|
||||
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
||||
if not binaries.ShakaPackager:
|
||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||
|
||||
if not path or not path.exists():
|
||||
raise ValueError("Tried to decrypt a file that does not exist.")
|
||||
|
||||
if use_mp4decrypt:
|
||||
return self._decrypt_with_mp4decrypt(path)
|
||||
else:
|
||||
return self._decrypt_with_shaka_packager(path)
|
||||
|
||||
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
|
||||
"""Decrypt using mp4decrypt"""
|
||||
if not binaries.Mp4decrypt:
|
||||
raise EnvironmentError("mp4decrypt executable not found but is required.")
|
||||
|
||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||
|
||||
# Build key arguments
|
||||
key_args = []
|
||||
for kid, key in self.content_keys.items():
|
||||
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
|
||||
key_hex = key if isinstance(key, str) else key.hex()
|
||||
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
|
||||
|
||||
cmd = [
|
||||
str(binaries.Mp4decrypt),
|
||||
"--show-progress",
|
||||
*key_args,
|
||||
str(path),
|
||||
str(output_path),
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
|
||||
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
|
||||
|
||||
if not output_path.exists():
|
||||
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
|
||||
if output_path.stat().st_size == 0:
|
||||
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
|
||||
|
||||
path.unlink()
|
||||
shutil.move(output_path, path)
|
||||
|
||||
def _decrypt_with_shaka_packager(self, path: Path) -> None:
|
||||
"""Decrypt using Shaka Packager (original method)"""
|
||||
if not binaries.ShakaPackager:
|
||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||
|
||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
@@ -227,22 +227,69 @@ class Widevine:
|
||||
finally:
|
||||
cdm.close(session_id)
|
||||
|
||||
def decrypt(self, path: Path) -> None:
|
||||
def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
|
||||
"""
|
||||
Decrypt a Track with Widevine DRM.
|
||||
Args:
|
||||
path: Path to the encrypted file to decrypt
|
||||
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
|
||||
Raises:
|
||||
EnvironmentError if the Shaka Packager executable could not be found.
|
||||
EnvironmentError if the required decryption executable could not be found.
|
||||
ValueError if the track has not yet been downloaded.
|
||||
SubprocessError if Shaka Packager returned a non-zero exit code.
|
||||
SubprocessError if the decryption process returned a non-zero exit code.
|
||||
"""
|
||||
if not self.content_keys:
|
||||
raise ValueError("Cannot decrypt a Track without any Content Keys...")
|
||||
|
||||
if not binaries.ShakaPackager:
|
||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||
if not path or not path.exists():
|
||||
raise ValueError("Tried to decrypt a file that does not exist.")
|
||||
|
||||
if use_mp4decrypt:
|
||||
return self._decrypt_with_mp4decrypt(path)
|
||||
else:
|
||||
return self._decrypt_with_shaka_packager(path)
|
||||
|
||||
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
|
||||
"""Decrypt using mp4decrypt"""
|
||||
if not binaries.Mp4decrypt:
|
||||
raise EnvironmentError("mp4decrypt executable not found but is required.")
|
||||
|
||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||
|
||||
# Build key arguments
|
||||
key_args = []
|
||||
for kid, key in self.content_keys.items():
|
||||
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
|
||||
key_hex = key if isinstance(key, str) else key.hex()
|
||||
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
|
||||
|
||||
cmd = [
|
||||
str(binaries.Mp4decrypt),
|
||||
"--show-progress",
|
||||
*key_args,
|
||||
str(path),
|
||||
str(output_path),
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
|
||||
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
|
||||
|
||||
if not output_path.exists():
|
||||
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
|
||||
if output_path.stat().st_size == 0:
|
||||
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
|
||||
|
||||
path.unlink()
|
||||
shutil.move(output_path, path)
|
||||
|
||||
def _decrypt_with_shaka_packager(self, path: Path) -> None:
|
||||
"""Decrypt using Shaka Packager (original method)"""
|
||||
if not binaries.ShakaPackager:
|
||||
raise EnvironmentError("Shaka Packager executable not found but is required.")
|
||||
|
||||
output_path = path.with_stem(f"{path.stem}_decrypted")
|
||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .basic import Basic
|
||||
from .hola import Hola
|
||||
from .nordvpn import NordVPN
|
||||
from .surfsharkvpn import SurfsharkVPN
|
||||
|
||||
__all__ = ("Basic", "Hola", "NordVPN")
|
||||
__all__ = ("Basic", "Hola", "NordVPN", "SurfsharkVPN")
|
||||
|
||||
124
unshackle/core/proxies/surfsharkvpn.py
Normal file
124
unshackle/core/proxies/surfsharkvpn.py
Normal file
@@ -0,0 +1,124 @@
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
from unshackle.core.proxies.proxy import Proxy
|
||||
|
||||
|
||||
class SurfsharkVPN(Proxy):
|
||||
def __init__(self, username: str, password: str, server_map: Optional[dict[str, int]] = None):
|
||||
"""
|
||||
Proxy Service using SurfsharkVPN Service Credentials.
|
||||
|
||||
A username and password must be provided. These are Service Credentials, not your Login Credentials.
|
||||
The Service Credentials can be found here: https://my.surfshark.com/vpn/manual-setup/main/openvpn
|
||||
"""
|
||||
if not username:
|
||||
raise ValueError("No Username was provided to the SurfsharkVPN Proxy Service.")
|
||||
if not password:
|
||||
raise ValueError("No Password was provided to the SurfsharkVPN Proxy Service.")
|
||||
if not re.match(r"^[a-z0-9]{48}$", username + password, re.IGNORECASE) or "@" in username:
|
||||
raise ValueError(
|
||||
"The Username and Password must be SurfsharkVPN Service Credentials, not your Login Credentials. "
|
||||
"The Service Credentials can be found here: https://my.surfshark.com/vpn/manual-setup/main/openvpn"
|
||||
)
|
||||
|
||||
if server_map is not None and not isinstance(server_map, dict):
|
||||
raise TypeError(f"Expected server_map to be a dict mapping a region to a server ID, not '{server_map!r}'.")
|
||||
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.server_map = server_map or {}
|
||||
|
||||
self.countries = self.get_countries()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
countries = len(set(x.get("country") for x in self.countries if x.get("country")))
|
||||
servers = sum(1 for x in self.countries if x.get("connectionName"))
|
||||
|
||||
return f"{countries} Countr{['ies', 'y'][countries == 1]} ({servers} Server{['s', ''][servers == 1]})"
|
||||
|
||||
def get_proxy(self, query: str) -> Optional[str]:
|
||||
"""
|
||||
Get an HTTP(SSL) proxy URI for a SurfsharkVPN server.
|
||||
"""
|
||||
query = query.lower()
|
||||
if re.match(r"^[a-z]{2}\d+$", query):
|
||||
# country and surfsharkvpn server id, e.g., au-per, be-anr, us-bos
|
||||
hostname = f"{query}.prod.surfshark.com"
|
||||
else:
|
||||
if query.isdigit():
|
||||
# country id
|
||||
country = self.get_country(by_id=int(query))
|
||||
elif re.match(r"^[a-z]+$", query):
|
||||
# country code
|
||||
country = self.get_country(by_code=query)
|
||||
else:
|
||||
raise ValueError(f"The query provided is unsupported and unrecognized: {query}")
|
||||
if not country:
|
||||
# SurfsharkVPN doesnt have servers in this region
|
||||
return
|
||||
|
||||
server_mapping = self.server_map.get(country["countryCode"].lower())
|
||||
if server_mapping:
|
||||
# country was set to a specific server ID in config
|
||||
hostname = f"{country['code'].lower()}{server_mapping}.prod.surfshark.com"
|
||||
else:
|
||||
# get the random server ID
|
||||
random_server = self.get_random_server(country["countryCode"])
|
||||
if not random_server:
|
||||
raise ValueError(
|
||||
f"The SurfsharkVPN Country {query} currently has no random servers. "
|
||||
"Try again later. If the issue persists, double-check the query."
|
||||
)
|
||||
hostname = random_server
|
||||
|
||||
return f"https://{self.username}:{self.password}@{hostname}:443"
|
||||
|
||||
def get_country(self, by_id: Optional[int] = None, by_code: Optional[str] = None) -> Optional[dict]:
|
||||
"""Search for a Country and it's metadata."""
|
||||
if all(x is None for x in (by_id, by_code)):
|
||||
raise ValueError("At least one search query must be made.")
|
||||
|
||||
for country in self.countries:
|
||||
if all(
|
||||
[
|
||||
by_id is None or country["id"] == int(by_id),
|
||||
by_code is None or country["countryCode"] == by_code.upper(),
|
||||
]
|
||||
):
|
||||
return country
|
||||
|
||||
def get_random_server(self, country_id: str):
|
||||
"""
|
||||
Get the list of random Server for a Country.
|
||||
|
||||
Note: There may not always be more than one recommended server.
|
||||
"""
|
||||
country = [x["connectionName"] for x in self.countries if x["countryCode"].lower() == country_id.lower()]
|
||||
try:
|
||||
country = random.choice(country)
|
||||
return country
|
||||
except Exception:
|
||||
raise ValueError("Could not get random countrycode from the countries list.")
|
||||
|
||||
@staticmethod
|
||||
def get_countries() -> list[dict]:
|
||||
"""Get a list of available Countries and their metadata."""
|
||||
res = requests.get(
|
||||
url="https://api.surfshark.com/v3/server/clusters/all",
|
||||
headers={
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
if not res.ok:
|
||||
raise ValueError(f"Failed to get a list of SurfsharkVPN countries [{res.status_code}]")
|
||||
|
||||
try:
|
||||
return res.json()
|
||||
except json.JSONDecodeError:
|
||||
raise ValueError("Could not decode list of SurfsharkVPN countries, not JSON data.")
|
||||
@@ -6,7 +6,14 @@ from unshackle.core.config import config
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.utilities import import_module_by_path
|
||||
|
||||
_SERVICES = sorted((path for path in config.directories.services.glob("*/__init__.py")), key=lambda x: x.parent.stem)
|
||||
_service_dirs = config.directories.services
|
||||
if not isinstance(_service_dirs, list):
|
||||
_service_dirs = [_service_dirs]
|
||||
|
||||
_SERVICES = sorted(
|
||||
(path for service_dir in _service_dirs for path in service_dir.glob("*/__init__.py")),
|
||||
key=lambda x: x.parent.stem,
|
||||
)
|
||||
|
||||
_MODULES = {path.parent.stem: getattr(import_module_by_path(path), path.parent.stem) for path in _SERVICES}
|
||||
|
||||
|
||||
@@ -107,14 +107,13 @@ class Episode(Title):
|
||||
name=self.name or "",
|
||||
).strip()
|
||||
|
||||
# MULTi
|
||||
if unique_audio_languages > 1:
|
||||
name += " MULTi"
|
||||
|
||||
if config.scene_naming:
|
||||
# Resolution
|
||||
if primary_video_track:
|
||||
resolution = primary_video_track.height
|
||||
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
|
||||
aspect_ratio = [
|
||||
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
|
||||
]
|
||||
if len(aspect_ratio) == 1:
|
||||
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
||||
aspect_ratio.append(1)
|
||||
@@ -135,12 +134,22 @@ class Episode(Title):
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
|
||||
# DUAL
|
||||
if unique_audio_languages == 2:
|
||||
name += " DUAL"
|
||||
|
||||
# MULTi
|
||||
if unique_audio_languages > 2:
|
||||
name += " MULTi"
|
||||
|
||||
# Audio Codec + Channels (+ feature)
|
||||
if primary_audio_track:
|
||||
codec = primary_audio_track.format
|
||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||
if channel_layout:
|
||||
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
|
||||
channels = float(
|
||||
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||
)
|
||||
else:
|
||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||
channels = float(channel_count)
|
||||
@@ -154,9 +163,16 @@ class Episode(Title):
|
||||
if primary_video_track:
|
||||
codec = primary_video_track.format
|
||||
hdr_format = primary_video_track.hdr_format_commercial
|
||||
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
|
||||
trc = (
|
||||
primary_video_track.transfer_characteristics
|
||||
or primary_video_track.transfer_characteristics_original
|
||||
)
|
||||
frame_rate = float(primary_video_track.frame_rate)
|
||||
if hdr_format:
|
||||
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
||||
if (primary_video_track.hdr_format_commercial) != "Dolby Vision":
|
||||
name += f" DV {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||
else:
|
||||
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||
elif trc and "HLG" in trc:
|
||||
name += " HLG"
|
||||
@@ -168,6 +184,9 @@ class Episode(Title):
|
||||
name += f"-{config.tag}"
|
||||
|
||||
return sanitize_filename(name)
|
||||
else:
|
||||
# Simple naming style without technical details - use spaces instead of dots
|
||||
return sanitize_filename(name, " ")
|
||||
|
||||
|
||||
class Series(SortedKeyList, ABC):
|
||||
|
||||
@@ -58,14 +58,13 @@ class Movie(Title):
|
||||
# Name (Year)
|
||||
name = str(self).replace("$", "S") # e.g., Arli$$
|
||||
|
||||
# MULTi
|
||||
if unique_audio_languages > 1:
|
||||
name += " MULTi"
|
||||
|
||||
if config.scene_naming:
|
||||
# Resolution
|
||||
if primary_video_track:
|
||||
resolution = primary_video_track.height
|
||||
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
|
||||
aspect_ratio = [
|
||||
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
|
||||
]
|
||||
if len(aspect_ratio) == 1:
|
||||
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
|
||||
aspect_ratio.append(1)
|
||||
@@ -86,12 +85,22 @@ class Movie(Title):
|
||||
# 'WEB-DL'
|
||||
name += " WEB-DL"
|
||||
|
||||
# DUAL
|
||||
if unique_audio_languages == 2:
|
||||
name += " DUAL"
|
||||
|
||||
# MULTi
|
||||
if unique_audio_languages > 2:
|
||||
name += " MULTi"
|
||||
|
||||
# Audio Codec + Channels (+ feature)
|
||||
if primary_audio_track:
|
||||
codec = primary_audio_track.format
|
||||
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
|
||||
if channel_layout:
|
||||
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
|
||||
channels = float(
|
||||
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
|
||||
)
|
||||
else:
|
||||
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
|
||||
channels = float(channel_count)
|
||||
@@ -105,9 +114,16 @@ class Movie(Title):
|
||||
if primary_video_track:
|
||||
codec = primary_video_track.format
|
||||
hdr_format = primary_video_track.hdr_format_commercial
|
||||
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
|
||||
trc = (
|
||||
primary_video_track.transfer_characteristics
|
||||
or primary_video_track.transfer_characteristics_original
|
||||
)
|
||||
frame_rate = float(primary_video_track.frame_rate)
|
||||
if hdr_format:
|
||||
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
|
||||
if (primary_video_track.hdr_format_commercial) != "Dolby Vision":
|
||||
name += f" DV {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||
else:
|
||||
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
|
||||
elif trc and "HLG" in trc:
|
||||
name += " HLG"
|
||||
@@ -119,6 +135,9 @@ class Movie(Title):
|
||||
name += f"-{config.tag}"
|
||||
|
||||
return sanitize_filename(name)
|
||||
else:
|
||||
# Simple naming style without technical details - use spaces instead of dots
|
||||
return sanitize_filename(name, " ")
|
||||
|
||||
|
||||
class Movies(SortedKeyList, ABC):
|
||||
|
||||
@@ -100,6 +100,7 @@ class Song(Title):
|
||||
# NN. Song Name
|
||||
name = str(self).split(" / ")[1]
|
||||
|
||||
if config.scene_naming:
|
||||
# Service
|
||||
if show_service:
|
||||
name += f" {self.service.__name__}"
|
||||
@@ -116,6 +117,9 @@ class Song(Title):
|
||||
name += f"-{config.tag}"
|
||||
|
||||
return sanitize_filename(name, " ")
|
||||
else:
|
||||
# Simple naming style without technical details
|
||||
return sanitize_filename(name, " ")
|
||||
|
||||
|
||||
class Album(SortedKeyList, ABC):
|
||||
|
||||
@@ -2,9 +2,10 @@ from .attachment import Attachment
|
||||
from .audio import Audio
|
||||
from .chapter import Chapter
|
||||
from .chapters import Chapters
|
||||
from .hybrid import Hybrid
|
||||
from .subtitle import Subtitle
|
||||
from .track import Track
|
||||
from .tracks import Tracks
|
||||
from .video import Video
|
||||
|
||||
__all__ = ("Audio", "Attachment", "Chapter", "Chapters", "Subtitle", "Track", "Tracks", "Video")
|
||||
__all__ = ("Audio", "Attachment", "Chapter", "Chapters", "Hybrid", "Subtitle", "Track", "Tracks", "Video")
|
||||
|
||||
@@ -62,6 +62,7 @@ class Attachment:
|
||||
session = session or requests.Session()
|
||||
response = session.get(url, stream=True)
|
||||
response.raise_for_status()
|
||||
config.directories.temp.mkdir(parents=True, exist_ok=True)
|
||||
download_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
with open(download_path, "wb") as f:
|
||||
|
||||
319
unshackle/core/tracks/hybrid.py
Normal file
319
unshackle/core/tracks/hybrid.py
Normal file
@@ -0,0 +1,319 @@
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from rich.padding import Padding
|
||||
from rich.rule import Rule
|
||||
|
||||
from unshackle.core.binaries import DoviTool, HDR10PlusTool
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import console
|
||||
|
||||
|
||||
class Hybrid:
|
||||
def __init__(self, videos, source) -> None:
|
||||
self.log = logging.getLogger("hybrid")
|
||||
|
||||
"""
|
||||
Takes the Dolby Vision and HDR10(+) streams out of the VideoTracks.
|
||||
It will then attempt to inject the Dolby Vision metadata layer to the HDR10(+) stream.
|
||||
If no DV track is available but HDR10+ is present, it will convert HDR10+ to DV.
|
||||
"""
|
||||
global directories
|
||||
from unshackle.core.tracks import Video
|
||||
|
||||
self.videos = videos
|
||||
self.source = source
|
||||
self.rpu_file = "RPU.bin"
|
||||
self.hdr_type = "HDR10"
|
||||
self.hevc_file = f"{self.hdr_type}-DV.hevc"
|
||||
self.hdr10plus_to_dv = False
|
||||
self.hdr10plus_file = "HDR10Plus.json"
|
||||
|
||||
# Get resolution info from HDR10 track for display
|
||||
hdr10_track = next((v for v in videos if v.range == Video.Range.HDR10), None)
|
||||
hdr10p_track = next((v for v in videos if v.range == Video.Range.HDR10P), None)
|
||||
track_for_res = hdr10_track or hdr10p_track
|
||||
self.resolution = f"{track_for_res.height}p" if track_for_res and track_for_res.height else "Unknown"
|
||||
|
||||
console.print(Padding(Rule(f"[rule.text]HDR10+DV Hybrid ({self.resolution})"), (1, 2)))
|
||||
|
||||
for video in self.videos:
|
||||
if not video.path or not os.path.exists(video.path):
|
||||
raise ValueError(f"Video track {video.id} was not downloaded before injection.")
|
||||
|
||||
# Check if we have DV track available
|
||||
has_dv = any(video.range == Video.Range.DV for video in self.videos)
|
||||
has_hdr10 = any(video.range == Video.Range.HDR10 for video in self.videos)
|
||||
has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
|
||||
|
||||
if not has_hdr10:
|
||||
raise ValueError("No HDR10 track available for hybrid processing.")
|
||||
|
||||
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
|
||||
if not has_dv and has_hdr10p:
|
||||
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
|
||||
self.hdr10plus_to_dv = True
|
||||
elif not has_dv:
|
||||
raise ValueError("No DV track available and no HDR10+ to convert.")
|
||||
|
||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||
self.log.info("✓ Already Injected")
|
||||
return
|
||||
|
||||
for video in videos:
|
||||
# Use the actual path from the video track
|
||||
save_path = video.path
|
||||
if not save_path or not os.path.exists(save_path):
|
||||
raise ValueError(f"Video track {video.id} was not downloaded or path not found: {save_path}")
|
||||
|
||||
if video.range == Video.Range.HDR10:
|
||||
self.extract_stream(save_path, "HDR10")
|
||||
elif video.range == Video.Range.HDR10P:
|
||||
self.extract_stream(save_path, "HDR10")
|
||||
self.hdr_type = "HDR10+"
|
||||
elif video.range == Video.Range.DV:
|
||||
self.extract_stream(save_path, "DV")
|
||||
|
||||
if self.hdr10plus_to_dv:
|
||||
# Extract HDR10+ metadata and convert to DV
|
||||
hdr10p_video = next(v for v in videos if v.range == Video.Range.HDR10P)
|
||||
self.extract_hdr10plus(hdr10p_video)
|
||||
self.convert_hdr10plus_to_dv()
|
||||
else:
|
||||
# Regular DV extraction
|
||||
dv_video = next(v for v in videos if v.range == Video.Range.DV)
|
||||
self.extract_rpu(dv_video)
|
||||
if os.path.isfile(config.directories.temp / "RPU_UNT.bin"):
|
||||
self.rpu_file = "RPU_UNT.bin"
|
||||
self.level_6()
|
||||
# Mode 3 conversion already done during extraction when not untouched
|
||||
elif os.path.isfile(config.directories.temp / "RPU.bin"):
|
||||
# RPU already extracted with mode 3
|
||||
pass
|
||||
|
||||
self.injecting()
|
||||
|
||||
self.log.info("✓ Injection Completed")
|
||||
if self.source == ("itunes" or "appletvplus"):
|
||||
Path.unlink(config.directories.temp / "hdr10.mkv")
|
||||
Path.unlink(config.directories.temp / "dv.mkv")
|
||||
Path.unlink(config.directories.temp / "HDR10.hevc", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / "DV.hevc", missing_ok=True)
|
||||
Path.unlink(config.directories.temp / f"{self.rpu_file}", missing_ok=True)
|
||||
|
||||
def ffmpeg_simple(self, save_path, output):
|
||||
"""Simple ffmpeg execution without progress tracking"""
|
||||
p = subprocess.run(
|
||||
[
|
||||
"ffmpeg",
|
||||
"-nostdin",
|
||||
"-i",
|
||||
str(save_path),
|
||||
"-c:v",
|
||||
"copy",
|
||||
str(output),
|
||||
"-y", # overwrite output
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
return p.returncode
|
||||
|
||||
def extract_stream(self, save_path, type_):
|
||||
output = Path(config.directories.temp / f"{type_}.hevc")
|
||||
|
||||
self.log.info(f"+ Extracting {type_} stream")
|
||||
|
||||
returncode = self.ffmpeg_simple(save_path, output)
|
||||
|
||||
if returncode:
|
||||
output.unlink(missing_ok=True)
|
||||
self.log.error(f"x Failed extracting {type_} stream")
|
||||
sys.exit(1)
|
||||
|
||||
def extract_rpu(self, video, untouched=False):
|
||||
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
|
||||
config.directories.temp / "RPU_UNT.bin"
|
||||
):
|
||||
return
|
||||
|
||||
self.log.info(f"+ Extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||
|
||||
extraction_args = [str(DoviTool)]
|
||||
if not untouched:
|
||||
extraction_args += ["-m", "3"]
|
||||
extraction_args += [
|
||||
"extract-rpu",
|
||||
config.directories.temp / "DV.hevc",
|
||||
"-o",
|
||||
config.directories.temp / f"{'RPU' if not untouched else 'RPU_UNT'}.bin",
|
||||
]
|
||||
|
||||
rpu_extraction = subprocess.run(
|
||||
extraction_args,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if rpu_extraction.returncode:
|
||||
Path.unlink(config.directories.temp / f"{'RPU' if not untouched else 'RPU_UNT'}.bin")
|
||||
if b"MAX_PQ_LUMINANCE" in rpu_extraction.stderr:
|
||||
self.extract_rpu(video, untouched=True)
|
||||
elif b"Invalid PPS index" in rpu_extraction.stderr:
|
||||
raise ValueError("Dolby Vision VideoTrack seems to be corrupt")
|
||||
else:
|
||||
raise ValueError(f"Failed extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
|
||||
|
||||
def level_6(self):
|
||||
"""Edit RPU Level 6 values"""
|
||||
with open(config.directories.temp / "L6.json", "w+") as level6_file:
|
||||
level6 = {
|
||||
"cm_version": "V29",
|
||||
"length": 0,
|
||||
"level6": {
|
||||
"max_display_mastering_luminance": 1000,
|
||||
"min_display_mastering_luminance": 1,
|
||||
"max_content_light_level": 0,
|
||||
"max_frame_average_light_level": 0,
|
||||
},
|
||||
}
|
||||
|
||||
json.dump(level6, level6_file, indent=3)
|
||||
|
||||
if not os.path.isfile(config.directories.temp / "RPU_L6.bin"):
|
||||
self.log.info("+ Editing RPU Level 6 values")
|
||||
level6 = subprocess.run(
|
||||
[
|
||||
str(DoviTool),
|
||||
"editor",
|
||||
"-i",
|
||||
config.directories.temp / self.rpu_file,
|
||||
"-j",
|
||||
config.directories.temp / "L6.json",
|
||||
"-o",
|
||||
config.directories.temp / "RPU_L6.bin",
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if level6.returncode:
|
||||
Path.unlink(config.directories.temp / "RPU_L6.bin")
|
||||
raise ValueError("Failed editing RPU Level 6 values")
|
||||
|
||||
# Update rpu_file to use the edited version
|
||||
self.rpu_file = "RPU_L6.bin"
|
||||
|
||||
def injecting(self):
|
||||
if os.path.isfile(config.directories.temp / self.hevc_file):
|
||||
return
|
||||
|
||||
self.log.info(f"+ Injecting Dolby Vision metadata into {self.hdr_type} stream")
|
||||
|
||||
inject_cmd = [
|
||||
str(DoviTool),
|
||||
"inject-rpu",
|
||||
"-i",
|
||||
config.directories.temp / "HDR10.hevc",
|
||||
"--rpu-in",
|
||||
config.directories.temp / self.rpu_file,
|
||||
]
|
||||
|
||||
# If we converted from HDR10+, optionally remove HDR10+ metadata during injection
|
||||
# Default to removing HDR10+ metadata since we're converting to DV
|
||||
if self.hdr10plus_to_dv:
|
||||
inject_cmd.append("--drop-hdr10plus")
|
||||
self.log.info(" - Removing HDR10+ metadata during injection")
|
||||
|
||||
inject_cmd.extend(["-o", config.directories.temp / self.hevc_file])
|
||||
|
||||
inject = subprocess.run(
|
||||
inject_cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if inject.returncode:
|
||||
Path.unlink(config.directories.temp / self.hevc_file)
|
||||
raise ValueError("Failed injecting Dolby Vision metadata into HDR10 stream")
|
||||
|
||||
def extract_hdr10plus(self, _video):
|
||||
"""Extract HDR10+ metadata from the video stream"""
|
||||
if os.path.isfile(config.directories.temp / self.hdr10plus_file):
|
||||
return
|
||||
|
||||
if not HDR10PlusTool:
|
||||
raise ValueError("HDR10Plus_tool not found. Please install it to use HDR10+ to DV conversion.")
|
||||
|
||||
self.log.info("+ Extracting HDR10+ metadata")
|
||||
|
||||
# HDR10Plus_tool needs raw HEVC stream
|
||||
extraction = subprocess.run(
|
||||
[
|
||||
str(HDR10PlusTool),
|
||||
"extract",
|
||||
str(config.directories.temp / "HDR10.hevc"),
|
||||
"-o",
|
||||
str(config.directories.temp / self.hdr10plus_file),
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if extraction.returncode:
|
||||
raise ValueError("Failed extracting HDR10+ metadata")
|
||||
|
||||
# Check if the extracted file has content
|
||||
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
|
||||
raise ValueError("No HDR10+ metadata found in the stream")
|
||||
|
||||
def convert_hdr10plus_to_dv(self):
|
||||
"""Convert HDR10+ metadata to Dolby Vision RPU"""
|
||||
if os.path.isfile(config.directories.temp / "RPU.bin"):
|
||||
return
|
||||
|
||||
self.log.info("+ Converting HDR10+ metadata to Dolby Vision")
|
||||
|
||||
# First create the extra metadata JSON for dovi_tool
|
||||
extra_metadata = {
|
||||
"cm_version": "V29",
|
||||
"length": 0, # dovi_tool will figure this out
|
||||
"level6": {
|
||||
"max_display_mastering_luminance": 1000,
|
||||
"min_display_mastering_luminance": 1,
|
||||
"max_content_light_level": 0,
|
||||
"max_frame_average_light_level": 0,
|
||||
},
|
||||
}
|
||||
|
||||
with open(config.directories.temp / "extra.json", "w") as f:
|
||||
json.dump(extra_metadata, f, indent=2)
|
||||
|
||||
# Generate DV RPU from HDR10+ metadata
|
||||
conversion = subprocess.run(
|
||||
[
|
||||
str(DoviTool),
|
||||
"generate",
|
||||
"-j",
|
||||
str(config.directories.temp / "extra.json"),
|
||||
"--hdr10plus-json",
|
||||
str(config.directories.temp / self.hdr10plus_file),
|
||||
"-o",
|
||||
str(config.directories.temp / "RPU.bin"),
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
|
||||
if conversion.returncode:
|
||||
raise ValueError("Failed converting HDR10+ to Dolby Vision")
|
||||
|
||||
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
|
||||
|
||||
# Clean up temporary files
|
||||
Path.unlink(config.directories.temp / "extra.json")
|
||||
Path.unlink(config.directories.temp / self.hdr10plus_file)
|
||||
@@ -15,9 +15,11 @@ from construct import Container
|
||||
from pycaption import Caption, CaptionList, CaptionNode, WebVTTReader
|
||||
from pycaption.geometry import Layout
|
||||
from pymp4.parser import MP4
|
||||
from subby import CommonIssuesFixer, SAMIConverter, SDHStripper, WebVTTConverter
|
||||
from subtitle_filter import Subtitles
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.tracks.track import Track
|
||||
from unshackle.core.utilities import try_ensure_utf8
|
||||
from unshackle.core.utils.webvtt import merge_segmented_webvtt
|
||||
@@ -30,6 +32,7 @@ class Subtitle(Track):
|
||||
SubStationAlphav4 = "ASS" # https://wikipedia.org/wiki/SubStation_Alpha#Advanced_SubStation_Alpha=
|
||||
TimedTextMarkupLang = "TTML" # https://wikipedia.org/wiki/Timed_Text_Markup_Language
|
||||
WebVTT = "VTT" # https://wikipedia.org/wiki/WebVTT
|
||||
SAMI = "SMI" # https://wikipedia.org/wiki/SAMI
|
||||
# MPEG-DASH box-encapsulated subtitle formats
|
||||
fTTML = "STPP" # https://www.w3.org/TR/2018/REC-ttml-imsc1.0.1-20180424
|
||||
fVTT = "WVTT" # https://www.w3.org/TR/webvtt1
|
||||
@@ -51,6 +54,8 @@ class Subtitle(Track):
|
||||
return Subtitle.Codec.TimedTextMarkupLang
|
||||
elif mime == "vtt":
|
||||
return Subtitle.Codec.WebVTT
|
||||
elif mime in ("smi", "sami"):
|
||||
return Subtitle.Codec.SAMI
|
||||
elif mime == "stpp":
|
||||
return Subtitle.Codec.fTTML
|
||||
elif mime == "wvtt":
|
||||
@@ -306,10 +311,158 @@ class Subtitle(Track):
|
||||
|
||||
return "\n".join(sanitized_lines)
|
||||
|
||||
def convert_with_subby(self, codec: Subtitle.Codec) -> Path:
|
||||
"""
|
||||
Convert subtitle using subby library for better format support and processing.
|
||||
|
||||
This method leverages subby's advanced subtitle processing capabilities
|
||||
including better WebVTT handling, SDH stripping, and common issue fixing.
|
||||
"""
|
||||
|
||||
if not self.path or not self.path.exists():
|
||||
raise ValueError("You must download the subtitle track first.")
|
||||
|
||||
if self.codec == codec:
|
||||
return self.path
|
||||
|
||||
output_path = self.path.with_suffix(f".{codec.value.lower()}")
|
||||
original_path = self.path
|
||||
|
||||
try:
|
||||
# Convert to SRT using subby first
|
||||
srt_subtitles = None
|
||||
|
||||
if self.codec == Subtitle.Codec.WebVTT:
|
||||
converter = WebVTTConverter()
|
||||
srt_subtitles = converter.from_file(str(self.path))
|
||||
elif self.codec == Subtitle.Codec.SAMI:
|
||||
converter = SAMIConverter()
|
||||
srt_subtitles = converter.from_file(str(self.path))
|
||||
|
||||
if srt_subtitles is not None:
|
||||
# Apply common fixes
|
||||
fixer = CommonIssuesFixer()
|
||||
fixed_srt, _ = fixer.from_srt(srt_subtitles)
|
||||
|
||||
# If target is SRT, we're done
|
||||
if codec == Subtitle.Codec.SubRip:
|
||||
output_path.write_text(str(fixed_srt), encoding="utf8")
|
||||
else:
|
||||
# Convert from SRT to target format using existing pycaption logic
|
||||
temp_srt_path = self.path.with_suffix(".temp.srt")
|
||||
temp_srt_path.write_text(str(fixed_srt), encoding="utf8")
|
||||
|
||||
# Parse the SRT and convert to target format
|
||||
caption_set = self.parse(temp_srt_path.read_bytes(), Subtitle.Codec.SubRip)
|
||||
self.merge_same_cues(caption_set)
|
||||
|
||||
writer = {
|
||||
Subtitle.Codec.TimedTextMarkupLang: pycaption.DFXPWriter,
|
||||
Subtitle.Codec.WebVTT: pycaption.WebVTTWriter,
|
||||
}.get(codec)
|
||||
|
||||
if writer:
|
||||
subtitle_text = writer().write(caption_set)
|
||||
output_path.write_text(subtitle_text, encoding="utf8")
|
||||
else:
|
||||
# Fall back to existing conversion method
|
||||
temp_srt_path.unlink()
|
||||
return self._convert_standard(codec)
|
||||
|
||||
temp_srt_path.unlink()
|
||||
|
||||
if original_path.exists() and original_path != output_path:
|
||||
original_path.unlink()
|
||||
|
||||
self.path = output_path
|
||||
self.codec = codec
|
||||
|
||||
if callable(self.OnConverted):
|
||||
self.OnConverted(codec)
|
||||
|
||||
return output_path
|
||||
else:
|
||||
# Fall back to existing conversion method
|
||||
return self._convert_standard(codec)
|
||||
|
||||
except Exception:
|
||||
# Fall back to existing conversion method on any error
|
||||
return self._convert_standard(codec)
|
||||
|
||||
def convert(self, codec: Subtitle.Codec) -> Path:
|
||||
"""
|
||||
Convert this Subtitle to another Format.
|
||||
|
||||
The conversion method is determined by the 'conversion_method' setting in config:
|
||||
- 'auto' (default): Uses subby for WebVTT/SAMI, standard for others
|
||||
- 'subby': Always uses subby with CommonIssuesFixer
|
||||
- 'subtitleedit': Uses SubtitleEdit when available, falls back to pycaption
|
||||
- 'pycaption': Uses only pycaption library
|
||||
"""
|
||||
# Check configuration for conversion method
|
||||
conversion_method = config.subtitle.get("conversion_method", "auto")
|
||||
|
||||
if conversion_method == "subby":
|
||||
return self.convert_with_subby(codec)
|
||||
elif conversion_method == "subtitleedit":
|
||||
return self._convert_standard(codec) # SubtitleEdit is used in standard conversion
|
||||
elif conversion_method == "pycaption":
|
||||
return self._convert_pycaption_only(codec)
|
||||
elif conversion_method == "auto":
|
||||
# Use subby for formats it handles better
|
||||
if self.codec in (Subtitle.Codec.WebVTT, Subtitle.Codec.SAMI):
|
||||
return self.convert_with_subby(codec)
|
||||
else:
|
||||
return self._convert_standard(codec)
|
||||
else:
|
||||
return self._convert_standard(codec)
|
||||
|
||||
def _convert_pycaption_only(self, codec: Subtitle.Codec) -> Path:
|
||||
"""
|
||||
Convert subtitle using only pycaption library (no SubtitleEdit, no subby).
|
||||
|
||||
This is the original conversion method that only uses pycaption.
|
||||
"""
|
||||
if not self.path or not self.path.exists():
|
||||
raise ValueError("You must download the subtitle track first.")
|
||||
|
||||
if self.codec == codec:
|
||||
return self.path
|
||||
|
||||
output_path = self.path.with_suffix(f".{codec.value.lower()}")
|
||||
original_path = self.path
|
||||
|
||||
# Use only pycaption for conversion
|
||||
writer = {
|
||||
Subtitle.Codec.SubRip: pycaption.SRTWriter,
|
||||
Subtitle.Codec.TimedTextMarkupLang: pycaption.DFXPWriter,
|
||||
Subtitle.Codec.WebVTT: pycaption.WebVTTWriter,
|
||||
}.get(codec)
|
||||
|
||||
if writer is None:
|
||||
raise NotImplementedError(f"Cannot convert {self.codec.name} to {codec.name} using pycaption only.")
|
||||
|
||||
caption_set = self.parse(self.path.read_bytes(), self.codec)
|
||||
Subtitle.merge_same_cues(caption_set)
|
||||
subtitle_text = writer().write(caption_set)
|
||||
|
||||
output_path.write_text(subtitle_text, encoding="utf8")
|
||||
|
||||
if original_path.exists() and original_path != output_path:
|
||||
original_path.unlink()
|
||||
|
||||
self.path = output_path
|
||||
self.codec = codec
|
||||
|
||||
if callable(self.OnConverted):
|
||||
self.OnConverted(codec)
|
||||
|
||||
return output_path
|
||||
|
||||
def _convert_standard(self, codec: Subtitle.Codec) -> Path:
|
||||
"""
|
||||
Convert this Subtitle to another Format.
|
||||
|
||||
The file path location of the Subtitle data will be kept at the same
|
||||
location but the file extension will be changed appropriately.
|
||||
|
||||
@@ -318,6 +471,7 @@ class Subtitle(Track):
|
||||
- TimedTextMarkupLang - SubtitleEdit or pycaption.DFXPWriter
|
||||
- WebVTT - SubtitleEdit or pycaption.WebVTTWriter
|
||||
- SubStationAlphav4 - SubtitleEdit
|
||||
- SAMI - subby.SAMIConverter (when available)
|
||||
- fTTML* - custom code using some pycaption functions
|
||||
- fVTT* - custom code using some pycaption functions
|
||||
*: Can read from format, but cannot convert to format
|
||||
@@ -416,6 +570,13 @@ class Subtitle(Track):
|
||||
text = Subtitle.sanitize_broken_webvtt(text)
|
||||
text = Subtitle.space_webvtt_headers(text)
|
||||
caption_set = pycaption.WebVTTReader().read(text)
|
||||
elif codec == Subtitle.Codec.SAMI:
|
||||
# Use subby for SAMI parsing
|
||||
converter = SAMIConverter()
|
||||
srt_subtitles = converter.from_bytes(data)
|
||||
# Convert SRT back to CaptionSet for compatibility
|
||||
srt_text = str(srt_subtitles).encode("utf8")
|
||||
caption_set = Subtitle.parse(srt_text, Subtitle.Codec.SubRip)
|
||||
else:
|
||||
raise ValueError(f'Unknown Subtitle format "{codec}"...')
|
||||
except pycaption.exceptions.CaptionReadSyntaxError as e:
|
||||
@@ -660,11 +821,45 @@ class Subtitle(Track):
|
||||
def strip_hearing_impaired(self) -> None:
|
||||
"""
|
||||
Strip captions for hearing impaired (SDH).
|
||||
It uses SubtitleEdit if available, otherwise filter-subs.
|
||||
|
||||
The SDH stripping method is determined by the 'sdh_method' setting in config:
|
||||
- 'auto' (default): Tries subby first, then SubtitleEdit, then filter-subs
|
||||
- 'subby': Uses subby's SDHStripper
|
||||
- 'subtitleedit': Uses SubtitleEdit when available
|
||||
- 'filter-subs': Uses subtitle-filter library
|
||||
"""
|
||||
if not self.path or not self.path.exists():
|
||||
raise ValueError("You must download the subtitle track first.")
|
||||
|
||||
# Check configuration for SDH stripping method
|
||||
sdh_method = config.subtitle.get("sdh_method", "auto")
|
||||
|
||||
if sdh_method == "subby" and self.codec == Subtitle.Codec.SubRip:
|
||||
# Use subby's SDHStripper directly on the file
|
||||
stripper = SDHStripper()
|
||||
stripped_srt, _ = stripper.from_file(str(self.path))
|
||||
self.path.write_text(str(stripped_srt), encoding="utf8")
|
||||
return
|
||||
elif sdh_method == "subtitleedit" and binaries.SubtitleEdit:
|
||||
# Force use of SubtitleEdit
|
||||
pass # Continue to SubtitleEdit section below
|
||||
elif sdh_method == "filter-subs":
|
||||
# Force use of filter-subs
|
||||
sub = Subtitles(self.path)
|
||||
sub.filter(rm_fonts=True, rm_ast=True, rm_music=True, rm_effects=True, rm_names=True, rm_author=True)
|
||||
sub.save()
|
||||
return
|
||||
elif sdh_method == "auto":
|
||||
# Try subby first for SRT files, then fall back
|
||||
if self.codec == Subtitle.Codec.SubRip:
|
||||
try:
|
||||
stripper = SDHStripper()
|
||||
stripped_srt, _ = stripper.from_file(str(self.path))
|
||||
self.path.write_text(str(stripped_srt), encoding="utf8")
|
||||
return
|
||||
except Exception:
|
||||
pass # Fall through to other methods
|
||||
|
||||
if binaries.SubtitleEdit:
|
||||
if self.codec == Subtitle.Codec.SubStationAlphav4:
|
||||
output_format = "AdvancedSubStationAlpha"
|
||||
|
||||
@@ -11,6 +11,7 @@ from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn, TimeRe
|
||||
from rich.table import Table
|
||||
from rich.tree import Tree
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.console import console
|
||||
from unshackle.core.constants import LANGUAGE_MAX_DISTANCE, AnyTrack, TrackT
|
||||
@@ -253,6 +254,31 @@ class Tracks:
|
||||
def select_subtitles(self, x: Callable[[Subtitle], bool]) -> None:
|
||||
self.subtitles = list(filter(x, self.subtitles))
|
||||
|
||||
def select_hybrid(self, tracks, quality):
|
||||
hdr10_tracks = [
|
||||
v
|
||||
for v in tracks
|
||||
if v.range == Video.Range.HDR10 and (v.height in quality or int(v.width * 9 / 16) in quality)
|
||||
]
|
||||
hdr10 = []
|
||||
for res in quality:
|
||||
candidates = [v for v in hdr10_tracks if v.height == res or int(v.width * 9 / 16) == res]
|
||||
if candidates:
|
||||
best = max(candidates, key=lambda v: v.bitrate) # assumes .bitrate exists
|
||||
hdr10.append(best)
|
||||
|
||||
dv_tracks = [v for v in tracks if v.range == Video.Range.DV]
|
||||
lowest_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
|
||||
|
||||
def select(x):
|
||||
if x in hdr10:
|
||||
return True
|
||||
if lowest_dv and x is lowest_dv:
|
||||
return True
|
||||
return False
|
||||
|
||||
return select
|
||||
|
||||
def by_resolutions(self, resolutions: list[int], per_resolution: int = 0) -> None:
|
||||
# Note: Do not merge these list comprehensions. They must be done separately so the results
|
||||
# from the 16:9 canvas check is only used if there's no exact height resolution match.
|
||||
@@ -290,8 +316,11 @@ class Tracks:
|
||||
progress: Update a rich progress bar via `completed=...`. This must be the
|
||||
progress object's update() func, pre-set with task id via functools.partial.
|
||||
"""
|
||||
if not binaries.MKVToolNix:
|
||||
raise RuntimeError("MKVToolNix (mkvmerge) is required for muxing but was not found")
|
||||
|
||||
cl = [
|
||||
"mkvmerge",
|
||||
str(binaries.MKVToolNix),
|
||||
"--no-date", # remove dates from the output for security
|
||||
]
|
||||
|
||||
@@ -302,8 +331,9 @@ class Tracks:
|
||||
if not vt.path or not vt.path.exists():
|
||||
raise ValueError("Video Track must be downloaded before muxing...")
|
||||
events.emit(events.Types.TRACK_MULTIPLEX, track=vt)
|
||||
cl.extend(
|
||||
[
|
||||
|
||||
# Prepare base arguments
|
||||
video_args = [
|
||||
"--language",
|
||||
f"0:{vt.language}",
|
||||
"--default-track",
|
||||
@@ -312,12 +342,21 @@ class Tracks:
|
||||
f"0:{vt.is_original_lang}",
|
||||
"--compression",
|
||||
"0:none", # disable extra compression
|
||||
"(",
|
||||
str(vt.path),
|
||||
")",
|
||||
]
|
||||
|
||||
# Add FPS fix if needed (typically for hybrid mode to prevent sync issues)
|
||||
if hasattr(vt, "needs_duration_fix") and vt.needs_duration_fix and vt.fps:
|
||||
video_args.extend(
|
||||
[
|
||||
"--default-duration",
|
||||
f"0:{vt.fps}fps" if isinstance(vt.fps, str) else f"0:{vt.fps:.3f}fps",
|
||||
"--fix-bitstream-timing-information",
|
||||
"0:1",
|
||||
]
|
||||
)
|
||||
|
||||
cl.extend(video_args + ["(", str(vt.path), ")"])
|
||||
|
||||
for i, at in enumerate(self.audio):
|
||||
if not at.path or not at.path.exists():
|
||||
raise ValueError("Audio Track must be downloaded before muxing...")
|
||||
|
||||
@@ -94,6 +94,7 @@ class Video(Track):
|
||||
HDR10 = "HDR10" # https://en.wikipedia.org/wiki/HDR10
|
||||
HDR10P = "HDR10+" # https://en.wikipedia.org/wiki/HDR10%2B
|
||||
DV = "DV" # https://en.wikipedia.org/wiki/Dolby_Vision
|
||||
HYBRID = "HYBRID" # Selects both HDR10 and DV tracks for hybrid processing with DoviTool
|
||||
|
||||
@staticmethod
|
||||
def from_cicp(primaries: int, transfer: int, matrix: int) -> Video.Range:
|
||||
@@ -115,6 +116,7 @@ class Video(Track):
|
||||
class Transfer(Enum):
|
||||
Unspecified = 0
|
||||
BT_709 = 1
|
||||
Unspecified_Image = 2
|
||||
BT_601 = 6
|
||||
BT_2020 = 14
|
||||
BT_2100 = 15
|
||||
@@ -236,6 +238,8 @@ class Video(Track):
|
||||
except Exception as e:
|
||||
raise ValueError("Expected fps to be a number, float, or a string as numerator/denominator form, " + str(e))
|
||||
|
||||
self.needs_duration_fix = False
|
||||
|
||||
def __str__(self) -> str:
|
||||
return " | ".join(
|
||||
filter(
|
||||
|
||||
188
unshackle/core/update_checker.py
Normal file
188
unshackle/core/update_checker.py
Normal file
@@ -0,0 +1,188 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
class UpdateChecker:
|
||||
"""Check for available updates from the GitHub repository."""
|
||||
|
||||
REPO_URL = "https://api.github.com/repos/unshackle-dl/unshackle/releases/latest"
|
||||
TIMEOUT = 5
|
||||
DEFAULT_CHECK_INTERVAL = 24 * 60 * 60 # 24 hours in seconds
|
||||
|
||||
@classmethod
|
||||
def _get_cache_file(cls) -> Path:
|
||||
"""Get the path to the update check cache file."""
|
||||
from unshackle.core.config import config
|
||||
|
||||
return config.directories.cache / "update_check.json"
|
||||
|
||||
@classmethod
|
||||
def _should_check_for_updates(cls, check_interval: int = DEFAULT_CHECK_INTERVAL) -> bool:
|
||||
"""
|
||||
Check if enough time has passed since the last update check.
|
||||
|
||||
Args:
|
||||
check_interval: Time in seconds between checks (default: 24 hours)
|
||||
|
||||
Returns:
|
||||
True if we should check for updates, False otherwise
|
||||
"""
|
||||
cache_file = cls._get_cache_file()
|
||||
|
||||
if not cache_file.exists():
|
||||
return True
|
||||
|
||||
try:
|
||||
with open(cache_file, "r") as f:
|
||||
cache_data = json.load(f)
|
||||
|
||||
last_check = cache_data.get("last_check", 0)
|
||||
current_time = time.time()
|
||||
|
||||
return (current_time - last_check) >= check_interval
|
||||
|
||||
except (json.JSONDecodeError, KeyError, OSError):
|
||||
# If cache is corrupted or unreadable, allow check
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def _update_cache(cls, latest_version: Optional[str] = None) -> None:
|
||||
"""
|
||||
Update the cache file with the current timestamp and latest version.
|
||||
|
||||
Args:
|
||||
latest_version: The latest version found, if any
|
||||
"""
|
||||
cache_file = cls._get_cache_file()
|
||||
|
||||
try:
|
||||
# Ensure cache directory exists
|
||||
cache_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
cache_data = {"last_check": time.time(), "latest_version": latest_version}
|
||||
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(cache_data, f)
|
||||
|
||||
except (OSError, json.JSONEncodeError):
|
||||
# Silently fail if we can't write cache
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _compare_versions(current: str, latest: str) -> bool:
|
||||
"""
|
||||
Simple semantic version comparison.
|
||||
|
||||
Args:
|
||||
current: Current version string (e.g., "1.1.0")
|
||||
latest: Latest version string (e.g., "1.2.0")
|
||||
|
||||
Returns:
|
||||
True if latest > current, False otherwise
|
||||
"""
|
||||
try:
|
||||
current_parts = [int(x) for x in current.split(".")]
|
||||
latest_parts = [int(x) for x in latest.split(".")]
|
||||
|
||||
max_length = max(len(current_parts), len(latest_parts))
|
||||
current_parts.extend([0] * (max_length - len(current_parts)))
|
||||
latest_parts.extend([0] * (max_length - len(latest_parts)))
|
||||
|
||||
for current_part, latest_part in zip(current_parts, latest_parts):
|
||||
if latest_part > current_part:
|
||||
return True
|
||||
elif latest_part < current_part:
|
||||
return False
|
||||
|
||||
return False
|
||||
except (ValueError, AttributeError):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
async def check_for_updates(cls, current_version: str) -> Optional[str]:
|
||||
"""
|
||||
Check if there's a newer version available on GitHub.
|
||||
|
||||
Args:
|
||||
current_version: The current version string (e.g., "1.1.0")
|
||||
|
||||
Returns:
|
||||
The latest version string if an update is available, None otherwise
|
||||
"""
|
||||
try:
|
||||
loop = asyncio.get_event_loop()
|
||||
response = await loop.run_in_executor(None, lambda: requests.get(cls.REPO_URL, timeout=cls.TIMEOUT))
|
||||
|
||||
if response.status_code != 200:
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
latest_version = data.get("tag_name", "").lstrip("v")
|
||||
|
||||
if not latest_version:
|
||||
return None
|
||||
|
||||
if cls._compare_versions(current_version, latest_version):
|
||||
return latest_version
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def check_for_updates_sync(cls, current_version: str, check_interval: Optional[int] = None) -> Optional[str]:
|
||||
"""
|
||||
Synchronous version of update check with rate limiting.
|
||||
|
||||
Args:
|
||||
current_version: The current version string (e.g., "1.1.0")
|
||||
check_interval: Time in seconds between checks (default: from config)
|
||||
|
||||
Returns:
|
||||
The latest version string if an update is available, None otherwise
|
||||
"""
|
||||
# Use config value if not specified
|
||||
if check_interval is None:
|
||||
from unshackle.core.config import config
|
||||
|
||||
check_interval = config.update_check_interval * 60 * 60 # Convert hours to seconds
|
||||
|
||||
# Check if we should skip this check due to rate limiting
|
||||
if not cls._should_check_for_updates(check_interval):
|
||||
return None
|
||||
|
||||
try:
|
||||
response = requests.get(cls.REPO_URL, timeout=cls.TIMEOUT)
|
||||
|
||||
if response.status_code != 200:
|
||||
# Update cache even on failure to prevent rapid retries
|
||||
cls._update_cache()
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
latest_version = data.get("tag_name", "").lstrip("v")
|
||||
|
||||
if not latest_version:
|
||||
cls._update_cache()
|
||||
return None
|
||||
|
||||
# Update cache with the latest version info
|
||||
cls._update_cache(latest_version)
|
||||
|
||||
if cls._compare_versions(current_version, latest_version):
|
||||
return latest_version
|
||||
|
||||
except Exception:
|
||||
# Update cache even on exception to prevent rapid retries
|
||||
cls._update_cache()
|
||||
pass
|
||||
|
||||
return None
|
||||
@@ -3,7 +3,6 @@ from __future__ import annotations
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
from difflib import SequenceMatcher
|
||||
@@ -12,6 +11,7 @@ from typing import Optional, Tuple
|
||||
|
||||
import requests
|
||||
|
||||
from unshackle.core import binaries
|
||||
from unshackle.core.config import config
|
||||
from unshackle.core.titles.episode import Episode
|
||||
from unshackle.core.titles.movie import Movie
|
||||
@@ -175,8 +175,7 @@ def external_ids(tmdb_id: int, kind: str) -> dict:
|
||||
def _apply_tags(path: Path, tags: dict[str, str]) -> None:
|
||||
if not tags:
|
||||
return
|
||||
mkvpropedit = shutil.which("mkvpropedit")
|
||||
if not mkvpropedit:
|
||||
if not binaries.Mkvpropedit:
|
||||
log.debug("mkvpropedit not found on PATH; skipping tags")
|
||||
return
|
||||
log.debug("Applying tags to %s: %s", path, tags)
|
||||
@@ -189,7 +188,7 @@ def _apply_tags(path: Path, tags: dict[str, str]) -> None:
|
||||
tmp_path = Path(f.name)
|
||||
try:
|
||||
subprocess.run(
|
||||
[mkvpropedit, str(path), "--tags", f"global:{tmp_path}"],
|
||||
[str(binaries.Mkvpropedit), str(path), "--tags", f"global:{tmp_path}"],
|
||||
check=False,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
|
||||
@@ -25,8 +25,20 @@ class Vaults:
|
||||
def __len__(self) -> int:
|
||||
return len(self.vaults)
|
||||
|
||||
def load(self, type_: str, **kwargs: Any) -> None:
|
||||
"""Load a Vault into the vaults list."""
|
||||
def load(self, type_: str, **kwargs: Any) -> bool:
|
||||
"""Load a Vault into the vaults list. Returns True if successful, False otherwise."""
|
||||
module = _MODULES.get(type_)
|
||||
if not module:
|
||||
raise ValueError(f"Unable to find vault command by the name '{type_}'.")
|
||||
try:
|
||||
vault = module(**kwargs)
|
||||
self.vaults.append(vault)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def load_critical(self, type_: str, **kwargs: Any) -> None:
|
||||
"""Load a critical Vault that must succeed or raise an exception."""
|
||||
module = _MODULES.get(type_)
|
||||
if not module:
|
||||
raise ValueError(f"Unable to find vault command by the name '{type_}'.")
|
||||
|
||||
@@ -16,7 +16,7 @@ from unshackle.core.manifests import DASH
|
||||
from unshackle.core.search_result import SearchResult
|
||||
from unshackle.core.service import Service
|
||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks
|
||||
from unshackle.core.tracks import Chapter, Subtitle, Tracks, Video
|
||||
|
||||
|
||||
class EXAMPLE(Service):
|
||||
@@ -49,6 +49,11 @@ class EXAMPLE(Service):
|
||||
self.title = title
|
||||
self.movie = movie
|
||||
self.device = device
|
||||
self.cdm = ctx.obj.cdm
|
||||
|
||||
# Get range parameter for HDR support
|
||||
range_param = ctx.parent.params.get("range_")
|
||||
self.range = range_param[0].name if range_param else "SDR"
|
||||
|
||||
if self.config is None:
|
||||
raise Exception("Config is missing!")
|
||||
@@ -160,15 +165,54 @@ class EXAMPLE(Service):
|
||||
return Series(episodes)
|
||||
|
||||
def get_tracks(self, title: Title_T) -> Tracks:
|
||||
streams = self.session.post(
|
||||
url=self.config["endpoints"]["streams"],
|
||||
params={
|
||||
# Handle HYBRID mode by fetching both HDR10 and DV tracks separately
|
||||
if self.range == "HYBRID" and self.cdm.security_level != 3:
|
||||
tracks = Tracks()
|
||||
|
||||
# Get HDR10 tracks
|
||||
hdr10_tracks = self._get_tracks_for_range(title, "HDR10")
|
||||
tracks.add(hdr10_tracks, warn_only=True)
|
||||
|
||||
# Get DV tracks
|
||||
dv_tracks = self._get_tracks_for_range(title, "DV")
|
||||
tracks.add(dv_tracks, warn_only=True)
|
||||
|
||||
return tracks
|
||||
else:
|
||||
# Normal single-range behavior
|
||||
return self._get_tracks_for_range(title, self.range)
|
||||
|
||||
def _get_tracks_for_range(self, title: Title_T, range_override: str = None) -> Tracks:
|
||||
# Use range_override if provided, otherwise use self.range
|
||||
current_range = range_override if range_override else self.range
|
||||
|
||||
# Build API request parameters
|
||||
params = {
|
||||
"token": self.token,
|
||||
"guid": title.id,
|
||||
},
|
||||
data={
|
||||
}
|
||||
|
||||
data = {
|
||||
"type": self.config["client"][self.device]["type"],
|
||||
},
|
||||
}
|
||||
|
||||
# Add range-specific parameters
|
||||
if current_range == "HDR10":
|
||||
data["video_format"] = "hdr10"
|
||||
elif current_range == "DV":
|
||||
data["video_format"] = "dolby_vision"
|
||||
else:
|
||||
data["video_format"] = "sdr"
|
||||
|
||||
# Only request high-quality HDR content with L1 CDM
|
||||
if current_range in ("HDR10", "DV") and self.cdm.security_level == 3:
|
||||
# L3 CDM - skip HDR content
|
||||
return Tracks()
|
||||
|
||||
streams = self.session.post(
|
||||
url=self.config["endpoints"]["streams"],
|
||||
params=params,
|
||||
data=data,
|
||||
).json()["media"]
|
||||
|
||||
self.license = {
|
||||
@@ -182,6 +226,15 @@ class EXAMPLE(Service):
|
||||
self.log.debug(f"Manifest URL: {manifest_url}")
|
||||
tracks = DASH.from_url(url=manifest_url, session=self.session).to_tracks(language=title.language)
|
||||
|
||||
# Set range attributes on video tracks
|
||||
for video in tracks.videos:
|
||||
if current_range == "HDR10":
|
||||
video.range = Video.Range.HDR10
|
||||
elif current_range == "DV":
|
||||
video.range = Video.Range.DV
|
||||
else:
|
||||
video.range = Video.Range.SDR
|
||||
|
||||
# Remove DRM-free ("clear") audio tracks
|
||||
tracks.audio = [
|
||||
track for track in tracks.audio if "clear" not in track.data["dash"]["representation"].get("id")
|
||||
|
||||
@@ -4,14 +4,40 @@ tag: user_tag
|
||||
# Set terminal background color (custom option not in CONFIG.md)
|
||||
set_terminal_bg: false
|
||||
|
||||
# Set file naming convention
|
||||
# true for style - Prime.Suspect.S07E01.The.Final.Act.Part.One.1080p.ITV.WEB-DL.AAC2.0.H.264
|
||||
# false for style - Prime Suspect S07E01 The Final Act - Part One
|
||||
scene_naming: true
|
||||
|
||||
# Check for updates from GitHub repository on startup (default: true)
|
||||
update_checks: true
|
||||
|
||||
# How often to check for updates, in hours (default: 24)
|
||||
update_check_interval: 24
|
||||
|
||||
# Muxing configuration
|
||||
muxing:
|
||||
set_title: false
|
||||
|
||||
# Login credentials for each Service
|
||||
credentials:
|
||||
# Direct credentials (no profile support)
|
||||
EXAMPLE: email@example.com:password
|
||||
EXAMPLE2: username:password
|
||||
|
||||
# Per-profile credentials with default fallback
|
||||
SERVICE_NAME:
|
||||
default: default@email.com:password # Used when no -p/--profile is specified
|
||||
profile1: user1@email.com:password1
|
||||
profile2: user2@email.com:password2
|
||||
|
||||
# Per-profile credentials without default (requires -p/--profile)
|
||||
SERVICE_NAME2:
|
||||
john: john@example.com:johnspassword
|
||||
jane: jane@example.com:janespassword
|
||||
|
||||
# You can also use list format for passwords with special characters
|
||||
SERVICE_NAME3:
|
||||
default: ["user@email.com", ":PasswordWith:Colons"]
|
||||
|
||||
# Override default directories used across unshackle
|
||||
directories:
|
||||
@@ -25,14 +51,25 @@ directories:
|
||||
prds: PRDs
|
||||
# Additional directories that can be configured:
|
||||
# commands: Commands
|
||||
# services: Services
|
||||
services:
|
||||
- /path/to/services
|
||||
- /other/path/to/services
|
||||
# vaults: Vaults
|
||||
# fonts: Fonts
|
||||
|
||||
# Pre-define which Widevine or PlayReady device to use for each Service
|
||||
cdm:
|
||||
# Global default CDM device (fallback for all services/profiles)
|
||||
default: WVD_1
|
||||
EXAMPLE: PRD_1
|
||||
|
||||
# Direct service-specific CDM
|
||||
DIFFERENT_EXAMPLE: PRD_1
|
||||
|
||||
# Per-profile CDM configuration
|
||||
EXAMPLE:
|
||||
john_sd: chromecdm_903_l3 # Profile 'john_sd' uses Chrome CDM L3
|
||||
jane_uhd: nexus_5_l1 # Profile 'jane_uhd' uses Nexus 5 L1
|
||||
default: generic_android_l3 # Default CDM for this service
|
||||
|
||||
# Use pywidevine Serve-compliant Remote CDMs
|
||||
remote_cdm:
|
||||
@@ -127,6 +164,15 @@ filenames:
|
||||
# API key for The Movie Database (TMDB)
|
||||
tmdb_api_key: ""
|
||||
|
||||
# conversion_method:
|
||||
# - auto (default): Smart routing - subby for WebVTT/SAMI, standard for others
|
||||
# - subby: Always use subby with advanced processing
|
||||
# - pycaption: Use only pycaption library (no SubtitleEdit, no subby)
|
||||
# - subtitleedit: Prefer SubtitleEdit when available, fall back to pycaption
|
||||
subtitle:
|
||||
conversion_method: auto
|
||||
sdh_method: auto
|
||||
|
||||
# Configuration for pywidevine's serve functionality
|
||||
serve:
|
||||
users:
|
||||
@@ -140,23 +186,48 @@ serve:
|
||||
# Configuration data for each Service
|
||||
services:
|
||||
# Service-specific configuration goes here
|
||||
# EXAMPLE:
|
||||
# api_key: "service_specific_key"
|
||||
# Profile-specific configurations can be nested under service names
|
||||
|
||||
# Legacy NordVPN configuration (use proxy_providers instead)
|
||||
nordvpn:
|
||||
username: ""
|
||||
password: ""
|
||||
servers:
|
||||
- us: 12
|
||||
# Example: with profile-specific device configs
|
||||
EXAMPLE:
|
||||
# Global service config
|
||||
api_key: "service_api_key"
|
||||
|
||||
# Profile-specific device configurations
|
||||
profiles:
|
||||
john_sd:
|
||||
device:
|
||||
app_name: "AIV"
|
||||
device_model: "SHIELD Android TV"
|
||||
jane_uhd:
|
||||
device:
|
||||
app_name: "AIV"
|
||||
device_model: "Fire TV Stick 4K"
|
||||
|
||||
# Example: Service with different regions per profile
|
||||
SERVICE_NAME:
|
||||
profiles:
|
||||
us_account:
|
||||
region: "US"
|
||||
api_endpoint: "https://api.us.service.com"
|
||||
uk_account:
|
||||
region: "GB"
|
||||
api_endpoint: "https://api.uk.service.com"
|
||||
|
||||
# External proxy provider services
|
||||
proxy_providers:
|
||||
nordvpn:
|
||||
username: username_from_service_credentials
|
||||
password: password_from_service_credentials
|
||||
servers:
|
||||
server_map:
|
||||
- us: 12 # force US server #12 for US proxies
|
||||
surfsharkvpn:
|
||||
username: your_surfshark_service_username # Service credentials from https://my.surfshark.com/vpn/manual-setup/main/openvpn
|
||||
password: your_surfshark_service_password # Service credentials (not your login password)
|
||||
server_map:
|
||||
- us: 3844 # force US server #3844 for US proxies
|
||||
- gb: 2697 # force GB server #2697 for GB proxies
|
||||
- au: 4621 # force AU server #4621 for AU proxies
|
||||
basic:
|
||||
GB:
|
||||
- "socks5://username:password@bhx.socks.ipvanish.com:1080" # 1 (Birmingham)
|
||||
@@ -30,7 +30,7 @@ class HTTP(Vault):
|
||||
api_mode: "query" for query parameters or "json" for JSON API
|
||||
"""
|
||||
super().__init__(name)
|
||||
self.url = host.rstrip("/")
|
||||
self.url = host
|
||||
self.password = password
|
||||
self.username = username
|
||||
self.api_mode = api_mode.lower()
|
||||
@@ -88,21 +88,23 @@ class HTTP(Vault):
|
||||
|
||||
if self.api_mode == "json":
|
||||
try:
|
||||
title = getattr(self, "current_title", None)
|
||||
response = self.request(
|
||||
"GetKey",
|
||||
{
|
||||
params = {
|
||||
"kid": kid,
|
||||
"service": service.lower(),
|
||||
"title": title,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
response = self.request("GetKey", params)
|
||||
if response.get("status") == "not_found":
|
||||
return None
|
||||
keys = response.get("keys", [])
|
||||
for key_entry in keys:
|
||||
if key_entry["kid"] == kid:
|
||||
return key_entry["key"]
|
||||
if isinstance(key_entry, str) and ":" in key_entry:
|
||||
entry_kid, entry_key = key_entry.split(":", 1)
|
||||
if entry_kid == kid:
|
||||
return entry_key
|
||||
elif isinstance(key_entry, dict):
|
||||
if key_entry.get("kid") == kid:
|
||||
return key_entry.get("key")
|
||||
except Exception as e:
|
||||
print(f"Failed to get key ({e.__class__.__name__}: {e})")
|
||||
return None
|
||||
|
||||
30
uv.lock
generated
30
uv.lock
generated
@@ -1391,6 +1391,26 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "srt"
|
||||
version = "3.5.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/66/b7/4a1bc231e0681ebf339337b0cd05b91dc6a0d701fa852bb812e244b7a030/srt-3.5.3.tar.gz", hash = "sha256:4884315043a4f0740fd1f878ed6caa376ac06d70e135f306a6dc44632eed0cc0", size = 28296, upload-time = "2023-03-28T02:35:44.007Z" }
|
||||
|
||||
[[package]]
|
||||
name = "subby"
|
||||
version = "0.3.21"
|
||||
source = { git = "https://github.com/vevv/subby.git#390cb2f4a55e98057cdd65314d8cbffd5d0a11f1" }
|
||||
dependencies = [
|
||||
{ name = "beautifulsoup4" },
|
||||
{ name = "click" },
|
||||
{ name = "langcodes" },
|
||||
{ name = "lxml" },
|
||||
{ name = "pymp4" },
|
||||
{ name = "srt" },
|
||||
{ name = "tinycss" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "subtitle-filter"
|
||||
version = "1.5.0"
|
||||
@@ -1400,6 +1420,12 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/10/40/c5d138e1f302b25240678943422a646feea52bab1f594c669c101c5e5070/subtitle_filter-1.5.0-py3-none-any.whl", hash = "sha256:6b506315be64870fba2e6894a70d76389407ce58c325fdf05129e0530f0a0f5b", size = 8346, upload-time = "2024-08-01T22:42:47.787Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinycss"
|
||||
version = "0.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/05/59/af583fff6236c7d2f94f8175c40ce501dcefb8d1b42e4bb7a2622dff689e/tinycss-0.4.tar.gz", hash = "sha256:12306fb50e5e9e7eaeef84b802ed877488ba80e35c672867f548c0924a76716e", size = 87759, upload-time = "2016-09-23T16:30:14.894Z" }
|
||||
|
||||
[[package]]
|
||||
name = "tomli"
|
||||
version = "2.2.1"
|
||||
@@ -1479,7 +1505,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "unshackle"
|
||||
version = "1.0.1"
|
||||
version = "1.2.0"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "appdirs" },
|
||||
@@ -1510,6 +1536,7 @@ dependencies = [
|
||||
{ name = "rlaphoenix-m3u8" },
|
||||
{ name = "ruamel-yaml" },
|
||||
{ name = "sortedcontainers" },
|
||||
{ name = "subby" },
|
||||
{ name = "subtitle-filter" },
|
||||
{ name = "unidecode" },
|
||||
{ name = "urllib3" },
|
||||
@@ -1558,6 +1585,7 @@ requires-dist = [
|
||||
{ name = "rlaphoenix-m3u8", specifier = ">=3.4.0,<4" },
|
||||
{ name = "ruamel-yaml", specifier = ">=0.18.6,<0.19" },
|
||||
{ name = "sortedcontainers", specifier = ">=2.4.0,<3" },
|
||||
{ name = "subby", git = "https://github.com/vevv/subby.git" },
|
||||
{ name = "subtitle-filter", specifier = ">=1.4.9,<2" },
|
||||
{ name = "unidecode", specifier = ">=1.3.8,<2" },
|
||||
{ name = "urllib3", specifier = ">=2.2.1,<3" },
|
||||
|
||||
Reference in New Issue
Block a user