66 Commits
1.0.1 ... 1.4.0

Author SHA1 Message Date
Sp5rky
1d4e8bf9ec Update CHANGELOG.md 2025-08-05 17:43:57 -06:00
Andy
b4a1f2236e feat: Bump version to 1.4.0 and update changelog with new features and fixes 2025-08-05 23:37:45 +00:00
Andy
3277ab0d77 feat(playready): Enhance KID extraction from PSSH with base64 support and XML parsing 2025-08-05 23:28:30 +00:00
Andy
be0f7299f8 style(dl): Standardize quotation marks for service attribute checks 2025-08-05 23:27:59 +00:00
Andy
948ef30de7 feat(dl): Add support for services that do not support subtitle downloads 2025-08-05 20:22:08 +00:00
Andy
1bd63ddc91 feat(titles): Better detection of DV across all codecs in Episode and Movie classes dvhe.05.06 was not being detected correctly. 2025-08-05 18:33:51 +00:00
Andy
4dff597af2 feat(dl): Fix track selection to support combining -V, -A, -S flags
Previously, using multiple track selection flags like `-S -A` would not work
as expected. The flags were treated as mutually exclusive, resulting in only
one type of track being downloaded.

This change refactors the track selection logic to properly handle combinations:

- Multiple "only" flags now work together (e.g., `-S -A` downloads both)
- Exclusion flags (`--no-*`) continue to work and override selections
- Default behavior (no flags) remains unchanged

Fixes #10
2025-08-05 15:48:17 +00:00
Andy
8dbdde697d feat(hybrid): Enhance extraction and conversion processes with dymanic spinning bars to follow the rest of the codebase. 2025-08-05 14:57:51 +00:00
Andy
63c697f082 feat(series): Enhance tree representation with season breakdown 2025-08-04 19:30:27 +00:00
Andy
3e0835d9fb feat(dl): Improve DRM track decryption handling 2025-08-04 19:30:27 +00:00
Andy
c6c83ee43b feat(dl): Enhance language selection for video and audio tracks, including original language support 2025-08-04 19:30:27 +00:00
Andy
507690834b feat(tracks): Add support for HLG color transfer characteristics in video arguments 2025-08-04 19:28:11 +00:00
Andy
f8a58d966b feat(subtitle): Add filtering for unwanted cues in WebVTT subtitles 2025-08-03 22:10:17 +00:00
Andy
8d12b735ff feat(dl): Add option to include forced subtitle tracks 2025-08-03 22:00:21 +00:00
Andy
1aaea23669 Revert "feat: Implement terminal cleanup on exit and signal handling in ComfyConsole"
This reverts commit 091d7335a3.
2025-08-03 15:26:30 +00:00
Andy
e3571b9518 feat(update_checker): Enhance update checking logic and cache handling 2025-08-03 06:58:59 +00:00
Andy
b478a00519 chore: Bump unshackle version to 1.3.0 in uv.lock 2025-08-03 06:45:40 +00:00
Andy
24fb8fb00c chore: Update changelog with new features, enhancements, and fixes for version 1.3.0 2025-08-03 06:30:53 +00:00
Andy
63e9a78b2a chore: Bump version to 1.3.0 and update changelog with mp4decrypt support and enhancements 2025-08-03 06:26:24 +00:00
Andy
a2bfe47993 feat(drm): Add support for mp4decrypt as a decryption method
* Introduced a new configuration option for DRM decryption in `unshackle.yaml`.
* Updated the `decrypt` methods in `PlayReady` and `Widevine` classes to allow using `mp4decrypt`.
* Enhanced the `Config` class to manage decryption methods per service.
* Added `mp4decrypt` binary detection in the binaries module.
2025-08-03 06:23:43 +00:00
Andy
cf4dc1ce76 feat: Add unshackle-example.yaml to replace the unshackle.yaml file, you can now make changes to the unshackle.yaml file and pull from the the repo without issues. 2025-08-03 00:54:29 +00:00
Andy
40028c81d7 Merge branch 'feature/scene-naming-option' 2025-08-03 00:48:22 +00:00
Andy
06df10cb58 fix: rename 'servers' to 'server_map' for proxy configuration in unshackle.yaml to resolve nord/surfshark incorrect named config 2025-08-01 20:23:03 +00:00
Andy
d61bec4a8c feat: Add scene naming option to configuration and update naming logic in titles 2025-08-01 18:40:40 +00:00
Andy
058bb60502 feat: update path of update_check.json to .gitignore 2025-08-01 17:44:11 +00:00
Andy
7583129e8f feat: Enhance credential management and CDM configuration in unshackle.yaml 2025-08-01 17:41:19 +00:00
Andy
4691694d2e feat: Add Unspecified_Image option to Transfer enum in Video class.
The Transfer enum was missing value 2, which according to ITU-T H.Sup19 standards represents "Unspecified (Image
  characteristics are unknown or are determined by the application)". This value is often used for still image coding systems.
2025-08-01 17:10:55 +00:00
Andy
a07345a0a2 refactor: Replace log.exit calls with ValueError exceptions for error handling in Hybrid class 2025-07-31 23:48:22 +00:00
Andy
091d7335a3 feat: Implement terminal cleanup on exit and signal handling in ComfyConsole 2025-07-31 18:25:18 +00:00
Andy
8c798b95c4 fix: Correct URL handling and improve key retrieval logic in HTTP vault 2025-07-31 15:45:12 +00:00
Andy
46c28fe943 feat: Add update check interval configuration and implement rate limiting for update checks 2025-07-30 23:36:59 +00:00
Andy
22c9aa195e feat: Bump version to 1.2.0 and update changelog, I'll eventually learn symantic versioning. 2025-07-30 23:15:20 +00:00
Andy
776d8f3df0 feat: Update version to 1.1.1 and add update checking functionality 2025-07-30 23:12:13 +00:00
Andy
67caf71295 Merge branch 'hdr10ptest' 2025-07-30 22:49:01 +00:00
Andy
3ed76d199c chore(workflow): 🗑️ Remove Docker build and publish workflow, its too messy at the moment doing manual builds for now. 2025-07-30 22:48:00 +00:00
Andy
4de9251f95 feat(tracks): Add duration fix handling for video and hybrid tracks 2025-07-30 21:39:34 +00:00
Andy
d2fb409ad9 feat(hybrid): Add HDR10+ support for conversion to Dolby Vision and enhance metadata extraction 2025-07-30 21:14:50 +00:00
Andy
fdff3a1c56 refactor(env): Enhance dependency check with detailed categorization and status summary 2025-07-30 20:12:43 +00:00
Andy
5d1f2eb458 feat(attachment): Ensure temporary directory is created for downloads 2025-07-30 18:52:36 +00:00
Andy
3efac3d474 feat(vaults): Enhance vault loading with success status 2025-07-30 17:29:06 +00:00
Andy
f578904b76 feat(subtitle): Add information into unshackle.yaml on how to use new Subby subtitle conversion. 2025-07-30 02:18:35 +00:00
Andy
9f20159605 feat(hybrid): Display resolution of HDR10 track in hybrid mode console output and clean up unused code 2025-07-30 02:08:07 +00:00
Andy
4decb0d107 feat(dl): Enhance hybrid processing to handle HDR10 and DV tracks separately by resolution, Hotfix for -q 2160,1080 both tracks will have Hybrid correctly now. 2025-07-30 01:09:59 +00:00
Sp5rky
80c40c8677 Merge pull request #1 from unshackle-dl/Hybrid-HDR
Hybrid HDR
2025-07-29 20:40:24 -04:00
Andy
26ef48c889 fix(download): 🐛 Skip Content-Length validation for compressed responses in curl_impersonate and requests 2025-07-30 00:32:25 +00:00
Andy
5dad2746b1 feat(subtitles): Integrate subby library for enhanced subtitle processing and conversion methods 2025-07-30 00:24:55 +00:00
Andy
24aa4647ed chore: Add CHANGELOG.md to document notable changes and version history 2025-07-29 20:32:35 +00:00
Andy
eeb553cb22 chore: 🔖 Bump version to 1.1.0 in pyproject.toml, __init__.py, and uv.lock to follow correct Semantic Versioning. 2025-07-29 19:48:34 +00:00
Andy
06c96b88a5 fix(download): 🐛 Skip Content-Length validation for compressed responses in curl_impersonate and requests. The fix ensures that when Content-Encoding indicates compression, we skip the validation by setting content_length = 0, allowing the downloads to complete successfully. 2025-07-29 19:13:50 +00:00
Andy
e8e376ad51 fix(hybrid): 🐛 Fix import order and add missing json import
fix(uv): 🐛 Update unshackle package version to 1.0.2
2025-07-29 19:11:11 +00:00
Andy
fbb140ec90 feat(EXAMPLE): Add support for HDR10 and DV tracks in hybrid mode 2025-07-29 17:57:01 +00:00
Andy
16a684c77f fix(dl): 🐛 Check for dovi_tool availability in hybrid mode 2025-07-29 17:47:27 +00:00
Andy
c97de0c32b feat(hybrid): Implement HDR10+DV hybrid processing and injection support
Original code by @P0llUx12 - Discord
2025-07-29 17:40:02 +00:00
Andy
c81b7f192e fix(install): 🐛 Improve UV installation process and error handling
* Enhanced the installation script for `uv` by:
  * Adding checks for existing installations.
  * Improving error messages for PowerShell script execution.
  * Ensuring `uv` is correctly added to the PATH for the current session.
* Updated the installation confirmation messages for clarity.
2025-07-25 22:40:46 +00:00
Andy
1b9fbe3401 fix(env): 🐛 Improve handling of directory paths in info command
* Enhanced the `info` command to support both single `Path` objects and lists of `Path` objects.
* For lists, each path is now displayed on a separate line, improving readability.
* Maintained original logic for single `Path` objects to ensure consistent behavior.
2025-07-25 18:46:55 +00:00
Andy
f69eb691d7 feat(binaries): Add support for MKVToolNix and mkvpropedit
* Introduced `MKVToolNix` and `mkvpropedit` binaries to the project.
* Updated the environment check to include required status for dependencies.
* Enhanced the `Tracks` class to raise an error if `MKVToolNix` is not found.
* Modified the `_apply_tags` function to utilize the `mkvpropedit` binary from the binaries module.
2025-07-25 18:27:14 +00:00
Andy
05ef841282 fix(env): 🐛 Update Shaka-Packager binary retrieval method
* Changed the binary retrieval for `Shaka-Packager` to use `find_binary` for improved accuracy.
* This ensures the correct binary is located and used in the environment checks.
2025-07-25 18:18:00 +00:00
Andy
454f19a0f7 fix(env): 🐛 Update binary search functionality to use binaries.find
* Refactored the `find_binary` function to utilize `binaries.find` for improved binary detection.
* Updated dependency path retrieval to ensure accurate results.
2025-07-25 18:09:06 +00:00
Andy
4276267455 feat(proxies): Add SurfsharkVPN support
Original code by @p0llux12 - Discord

- Introduced `SurfsharkVPN` class for proxy service integration.
- Updated configuration to include `surfsharkvpn` in proxy providers.
- Removed legacy `nordvpn` configuration from YAML.
- Enhanced `dl.py` and `search.py` to utilize `SurfsharkVPN`.
2025-07-25 09:03:08 +00:00
Andy
ab40dc1bf0 Merge branch 'main' of https://github.com/unshackle-dl/unshackle 2025-07-25 08:32:27 +00:00
Andy
ec16e54c10 fix(binaries): 🐛 Improve local binary search functionality
* Added logic to check for executables in a local `binaries` directory.
* Enhanced Windows support by checking for `.exe` extensions.
* Removed unnecessary `binaries/` entry from `.gitignore`.
2025-07-25 08:32:26 +00:00
Sp5rky
20285f4522 Update issue templates 2025-07-20 20:59:48 -06:00
Andy
eaa5943b8e Include yaml updates showing how to use new multiple service folders 2025-07-20 16:51:38 +00:00
Andy
4385035b05 fix(cfg): 🐛 Update services directory handling
* Updated the `services` directory assignment to ensure it is always treated as a list, improving consistency in configuration handling. Allows to provide multiple different service folders.
2025-07-20 16:49:44 +00:00
Andy
cb26ac6fa2 feat: Update version display in main.py
* Changed the version display in `__main__.py` to include copyright information.
2025-07-20 15:45:50 +00:00
Andy
95674d5739 Update readme with better instructions for docker usage with correct downloads path 2025-07-20 05:38:46 +00:00
44 changed files with 2388 additions and 440 deletions

32
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,32 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: Sp5rky
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Run command uv run [...]
2. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. Windows/Unix]
- Version [e.g. 1.0.1]
- Shaka-packager Version [e.g. 2.6.1]
- n_m3u8dl-re Version [e.g. 0.3.0 beta]
- Any additional software, such as subby/ccextractor/aria2c
**Additional context**
Add any other context about the problem here, if you're reporting issues with services not running or working, please try to expand on where in your service it breaks but don't include service code (unless you have rights to do so.)

View File

@@ -0,0 +1,21 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: Sp5rky
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
Other tools like Devine/VT had this function [...]
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -1,99 +0,0 @@
name: Build and Publish Docker Image
on:
push:
branches: [main, master]
paths: # run only when this file changed at all
- "unshackle/core/__init__.py"
pull_request: {} # optional delete if you dont build on PRs
workflow_dispatch: {} # manual override
jobs:
detect-version-change:
runs-on: ubuntu-latest
outputs:
changed: ${{ steps.vdiff.outputs.changed }}
version: ${{ steps.vdiff.outputs.version }}
steps:
- uses: actions/checkout@v4
with: { fetch-depth: 2 } # we need the previous commit :contentReference[oaicite:1]{index=1}
- name: Extract & compare version
id: vdiff
shell: bash
run: |
current=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
prev=$(git show HEAD^:unshackle/core/__init__.py \
| grep -oP '__version__ = "\K[^"]+' || echo '')
echo "version=$current" >>"$GITHUB_OUTPUT"
echo "changed=$([ "$current" != "$prev" ] && echo true || echo false)" >>"$GITHUB_OUTPUT"
echo "Current=$current Previous=$prev"
build-and-push:
needs: detect-version-change
if: needs.detect-version-change.outputs.changed == 'true' # only run when bumped :contentReference[oaicite:2]{index=2}
runs-on: ubuntu-latest
permissions: { contents: read, packages: write }
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Extract version from __init__.py
id: version
run: |
VERSION=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "major_minor=$(echo $VERSION | cut -d. -f1-2)" >> $GITHUB_OUTPUT
echo "major=$(echo $VERSION | cut -d. -f1)" >> $GITHUB_OUTPUT
echo "Extracted version: $VERSION"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest,enable={{is_default_branch}}
type=raw,value=v${{ steps.version.outputs.version }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.version }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.major_minor }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.major }},enable={{is_default_branch}}
- name: Show planned tags
run: |
echo "Planning to create the following tags:"
echo "${{ steps.meta.outputs.tags }}"
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Test Docker image
if: github.event_name != 'pull_request'
run: |
docker run --rm ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest env check

2
.gitignore vendored
View File

@@ -1,6 +1,7 @@
# unshackle # unshackle
unshackle.yaml unshackle.yaml
unshackle.yml unshackle.yml
update_check.json
*.mkv *.mkv
*.mp4 *.mp4
*.exe *.exe
@@ -18,7 +19,6 @@ device_cert
device_client_id_blob device_client_id_blob
device_private_key device_private_key
device_vmp_blob device_vmp_blob
binaries/
unshackle/cache/ unshackle/cache/
unshackle/cookies/ unshackle/cookies/
unshackle/certs/ unshackle/certs/

168
CHANGELOG.md Normal file
View File

@@ -0,0 +1,168 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.4.0] - 2025-08-05
### Added
- **HLG Transfer Characteristics Preservation**: Enhanced video muxing to preserve HLG color metadata
- Added automatic detection of HLG video tracks during muxing process
- Implemented `--color-transfer-characteristics 0:18` argument for mkvmerge when processing HLG content
- Prevents incorrect conversion from HLG (18) to BT.2020 (14) transfer characteristics
- Ensures proper HLG playback support on compatible hardware without manual editing
- **Original Language Support**: Enhanced language selection with 'orig' keyword support
- Added support for 'orig' language selector for both video and audio tracks
- Automatically detects and uses the title's original language when 'orig' is specified
- Improved language processing logic with better duplicate handling
- Enhanced help text to document original language selection usage
- **Forced Subtitle Support**: Added option to include forced subtitle tracks
- New functionality to download and include forced subtitle tracks alongside regular subtitles
- **WebVTT Subtitle Filtering**: Enhanced subtitle processing capabilities
- Added filtering for unwanted cues in WebVTT subtitles
- Improved subtitle quality by removing unnecessary metadata
### Changed
- **DRM Track Decryption**: Improved DRM decryption track selection logic
- Enhanced `get_drm_for_cdm()` method usage for better DRM-CDM matching
- Added warning messages when no matching DRM is found for tracks
- Improved error handling and logging for DRM decryption failures
- **Series Tree Representation**: Enhanced episode tree display formatting
- Updated series tree to show season breakdown with episode counts
- Improved visual representation with "S{season}({count})" format
- Better organization of series information in console output
- **Hybrid Processing UI**: Enhanced extraction and conversion processes
- Added dynamic spinning bars to follow the rest of the codebase design
- Improved visual feedback during hybrid HDR processing operations
- **Track Selection Logic**: Enhanced multi-track selection capabilities
- Fixed track selection to support combining -V, -A, -S flags properly
- Improved flexibility in selecting multiple track types simultaneously
- **Service Subtitle Support**: Added configuration for services without subtitle support
- Services can now indicate if they don't support subtitle downloads
- Prevents unnecessary subtitle download attempts for unsupported services
- **Update Checker**: Enhanced update checking logic and cache handling
- Improved rate limiting and caching mechanisms for update checks
- Better performance and reduced API calls to GitHub
### Fixed
- **PlayReady KID Extraction**: Enhanced KID extraction from PSSH data
- Added base64 support and XML parsing for better KID detection
- Fixed issue where only one KID was being extracted for certain services
- Improved multi-KID support for PlayReady protected content
- **Dolby Vision Detection**: Improved DV codec detection across all formats
- Fixed detection of dvhe.05.06 codec which was not being recognized correctly
- Enhanced detection logic in Episode and Movie title classes
- Better support for various Dolby Vision codec variants
## [1.3.0] - 2025-08-03
### Added
- **mp4decrypt Support**: Alternative DRM decryption method using mp4decrypt from Bento4
- Added `mp4decrypt` binary detection and support in binaries module
- New `decryption` configuration option in unshackle.yaml for service-specific decryption methods
- Enhanced PlayReady and Widevine DRM classes with mp4decrypt decryption support
- Service-specific decryption mapping allows choosing between `shaka` and `mp4decrypt` per service
- Improved error handling and progress reporting for mp4decrypt operations
- **Scene Naming Configuration**: New `scene_naming` option for controlling file naming conventions
- Added scene naming logic to movie, episode, and song title classes
- Configurable through unshackle.yaml to enable/disable scene naming standards
- **Terminal Cleanup and Signal Handling**: Enhanced console management
- Implemented proper terminal cleanup on application exit
- Added signal handling for graceful shutdown in ComfyConsole
- **Configuration Template**: New `unshackle-example.yaml` template file
- Replaced main `unshackle.yaml` with example template to prevent git conflicts
- Users can now modify their local config without affecting repository updates
- **Enhanced Credential Management**: Improved CDM and vault configuration
- Expanded credential management documentation in configuration
- Enhanced CDM configuration examples and guidelines
- **Video Transfer Standards**: Added `Unspecified_Image` option to Transfer enum
- Implements ITU-T H.Sup19 standard value 2 for image characteristics
- Supports still image coding systems and unknown transfer characteristics
- **Update Check Rate Limiting**: Enhanced update checking system
- Added configurable update check intervals to prevent excessive API calls
- Improved rate limiting for GitHub API requests
### Changed
- **DRM Decryption Architecture**: Enhanced decryption system with dual method support
- Updated `dl.py` to handle service-specific decryption method selection
- Refactored `Config` class to manage decryption method mapping per service
- Enhanced DRM decrypt methods with `use_mp4decrypt` parameter for method selection
- **Error Handling**: Improved exception handling in Hybrid class
- Replaced log.exit calls with ValueError exceptions for better error propagation
- Enhanced error handling consistency across hybrid processing
### Fixed
- **Proxy Configuration**: Fixed proxy server mapping in configuration
- Renamed 'servers' to 'server_map' in proxy configuration to resolve Nord/Surfshark naming conflicts
- Updated configuration structure for better compatibility with proxy providers
- **HTTP Vault**: Improved URL handling and key retrieval logic
- Fixed URL processing issues in HTTP-based key vaults
- Enhanced key retrieval reliability and error handling
## [1.2.0] - 2025-07-30
### Added
- **Update Checker**: Automatic GitHub release version checking on startup
- Configurable update notifications via `update_checks` setting in unshackle.yaml
- Non-blocking HTTP requests with 5-second timeout for performance
- Smart semantic version comparison supporting all version formats (x.y.z, x.y, x)
- Graceful error handling for network issues and API failures
- User-friendly update notifications with current → latest version display
- Direct links to GitHub releases page for easy updates
- **HDR10+ Support**: Enhanced HDR10+ metadata processing for hybrid tracks
- HDR10+ tool binary support (`hdr10plus_tool`) added to binaries module
- HDR10+ to Dolby Vision conversion capabilities in hybrid processing
- Enhanced metadata extraction for HDR10+ content
- **Duration Fix Handling**: Added duration correction for video and hybrid tracks
- **Temporary Directory Management**: Automatic creation of temp directories for attachment downloads
### Changed
- Enhanced configuration system with new `update_checks` boolean option (defaults to true)
- Updated sample unshackle.yaml with update checker configuration documentation
- Improved console styling consistency using `bright_black` for dimmed text
- **Environment Dependency Check**: Complete overhaul with detailed categorization and status summary
- Organized dependencies by category (Core, HDR, Download, Subtitle, Player, Network)
- Enhanced status reporting with compact summary display
- Improved tool requirement tracking and missing dependency alerts
- **Hybrid Track Processing**: Significant improvements to HDR10+ and Dolby Vision handling
- Enhanced metadata extraction and processing workflows
- Better integration with HDR processing tools
### Removed
- **Docker Workflow**: Removed Docker build and publish GitHub Actions workflow for manual builds
## [1.1.0] - 2025-07-29
### Added
- **HDR10+DV Hybrid Processing**: New `-r HYBRID` command for processing HDR10 and Dolby Vision tracks
- Support for hybrid HDR processing and injection using dovi_tool
- New hybrid track processing module for seamless HDR10/DV conversion
- Automatic detection and handling of HDR10 and DV metadata
- Support for HDR10 and DV tracks in hybrid mode for EXAMPLE service
- Binary availability check for dovi_tool in hybrid mode operations
- Enhanced track processing capabilities for HDR content
### Fixed
- Import order issues and missing json import in hybrid processing
- UV installation process and error handling improvements
- Binary search functionality updated to use `binaries.find`
### Changed
- Updated package version from 1.0.2 to 1.1.0
- Enhanced dl.py command processing for hybrid mode support
- Improved core titles (episode/movie) processing for HDR content
- Extended tracks module with hybrid processing capabilities

View File

@@ -213,6 +213,37 @@ downloader:
The `default` entry is optional. If omitted, `requests` will be used for services not listed. The `default` entry is optional. If omitted, `requests` will be used for services not listed.
## decryption (str | dict)
Choose what software to use to decrypt DRM-protected content throughout unshackle where needed.
You may provide a single decryption method globally or a mapping of service tags to
decryption methods.
Options:
- `shaka` (default) - Shaka Packager - <https://github.com/shaka-project/shaka-packager>
- `mp4decrypt` - mp4decrypt from Bento4 - <https://github.com/axiomatic-systems/Bento4>
Note that Shaka Packager is the traditional method and works with most services. mp4decrypt
is an alternative that may work better with certain services that have specific encryption formats.
Example mapping:
```yaml
decryption:
ATVP: mp4decrypt
AMZN: shaka
default: shaka
```
The `default` entry is optional. If omitted, `shaka` will be used for services not listed.
Simple configuration (single method for all services):
```yaml
decryption: mp4decrypt
```
## filenames (dict) ## filenames (dict)
Override the default filenames used across unshackle. Override the default filenames used across unshackle.

View File

@@ -75,4 +75,4 @@ RUN uv sync --frozen --no-dev
# Set entrypoint to allow passing commands directly to unshackle # Set entrypoint to allow passing commands directly to unshackle
ENTRYPOINT ["uv", "run", "unshackle"] ENTRYPOINT ["uv", "run", "unshackle"]
CMD ["-h"] CMD ["-h"]

View File

@@ -14,6 +14,7 @@ unshackle is a fork of [Devine](https://github.com/devine-dl/devine/), a powerfu
- 🎥 **Multi-Media Support** - Movies, TV episodes, and music - 🎥 **Multi-Media Support** - Movies, TV episodes, and music
- 🛠️ **Built-in Parsers** - DASH/HLS and ISM manifest support - 🛠️ **Built-in Parsers** - DASH/HLS and ISM manifest support
- 🔒 **DRM Support** - Widevine and PlayReady integration - 🔒 **DRM Support** - Widevine and PlayReady integration
- 🌈 **HDR10+DV Hybrid** - Hybrid Dolby Vision injection via [dovi_tool](https://github.com/quietvoid/dovi_tool)
- 💾 **Flexible Storage** - Local and remote key vaults - 💾 **Flexible Storage** - Local and remote key vaults
- 👥 **Multi-Profile Auth** - Support for cookies and credentials - 👥 **Multi-Profile Auth** - Support for cookies and credentials
- 🤖 **Smart Naming** - Automatic P2P-style filename structure - 🤖 **Smart Naming** - Automatic P2P-style filename structure
@@ -54,12 +55,11 @@ docker run --rm ghcr.io/unshackle-dl/unshackle:latest env check
# Download content (mount directories for persistent data) # Download content (mount directories for persistent data)
docker run --rm \ docker run --rm \
-v "$(pwd)/downloads:/downloads" \ -v "$(pwd)/unshackle/downloads:/app/downloads" \
-v "$(pwd)/unshackle/cookies:/app/unshackle/cookies" \ -v "$(pwd)/unshackle/cookies:/app/unshackle/cookies" \
-v "$(pwd)/unshackle/services:/app/unshackle/services" \ -v "$(pwd)/unshackle/services:/app/unshackle/services" \
-v "$(pwd)/unshackle/WVDs:/app/unshackle/WVDs" \ -v "$(pwd)/unshackle/WVDs:/app/unshackle/WVDs" \
-v "$(pwd)/unshackle/PRDs:/app/unshackle/PRDs" \ -v "$(pwd)/unshackle/PRDs:/app/unshackle/PRDs" \
-v "$(pwd)/temp:/app/temp" \
-v "$(pwd)/unshackle/unshackle.yaml:/app/unshackle.yaml" \ -v "$(pwd)/unshackle/unshackle.yaml:/app/unshackle.yaml" \
ghcr.io/unshackle-dl/unshackle:latest dl SERVICE_NAME CONTENT_ID ghcr.io/unshackle-dl/unshackle:latest dl SERVICE_NAME CONTENT_ID
@@ -88,7 +88,6 @@ docker run --rm unshackle env check
## Planned Features ## Planned Features
- 🌈 **HDR10+DV Hybrid Support** - Allow support for hybrid HDR10+ and Dolby Vision.
- 🖥️ **Web UI Access & Control** - Manage and control unshackle from a modern web interface. - 🖥️ **Web UI Access & Control** - Manage and control unshackle from a modern web interface.
- 🔄 **Sonarr/Radarr Interactivity** - Direct integration for automated personal downloads. - 🔄 **Sonarr/Radarr Interactivity** - Direct integration for automated personal downloads.
- ⚙️ **Better ISM Support** - Improve on ISM support for multiple services - ⚙️ **Better ISM Support** - Improve on ISM support for multiple services

View File

@@ -1,47 +1,61 @@
@echo off @echo off
echo Installing unshackle dependencies... setlocal EnableExtensions EnableDelayedExpansion
echo.
echo === Unshackle setup (Windows) ===
echo. echo.
REM Check if UV is already installed where uv >nul 2>&1
uv --version >nul 2>&1
if %errorlevel% equ 0 ( if %errorlevel% equ 0 (
echo UV is already installed. echo [OK] uv is already installed.
goto install_deps goto install_deps
) )
echo UV not found. Installing UV... echo [..] uv not found. Installing...
echo.
REM Install UV using the official installer powershell -NoProfile -ExecutionPolicy Bypass -Command "irm https://astral.sh/uv/install.ps1 | iex"
powershell -Command "irm https://astral.sh/uv/install.ps1 | iex"
if %errorlevel% neq 0 ( if %errorlevel% neq 0 (
echo Failed to install UV. Please install UV manually from https://docs.astral.sh/uv/getting-started/installation/ echo [ERR] Failed to install uv.
echo PowerShell may be blocking scripts. Try:
echo Set-ExecutionPolicy RemoteSigned -Scope CurrentUser
echo or install manually: https://docs.astral.sh/uv/getting-started/installation/
pause pause
exit /b 1 exit /b 1
) )
REM Add UV to PATH for current session set "UV_BIN="
set "PATH=%USERPROFILE%\.cargo\bin;%PATH%" for %%D in ("%USERPROFILE%\.local\bin" "%LOCALAPPDATA%\Programs\uv\bin" "%USERPROFILE%\.cargo\bin") do (
if exist "%%~fD\uv.exe" set "UV_BIN=%%~fD"
)
echo UV installed successfully. if not defined UV_BIN (
echo. echo [WARN] Could not locate uv.exe. You may need to reopen your terminal.
) else (
set "PATH=%UV_BIN%;%PATH%"
)
:: Verify
uv --version >nul 2>&1
if %errorlevel% neq 0 (
echo [ERR] uv still not reachable in this shell. Open a new terminal and re-run this script.
pause
exit /b 1
)
echo [OK] uv installed and reachable.
:install_deps :install_deps
echo Installing project dependencies in editable mode with dev dependencies...
echo. echo.
REM Install the project in editable mode with dev dependencies
uv sync uv sync
if %errorlevel% neq 0 ( if %errorlevel% neq 0 (
echo Failed to install dependencies. Please check the error messages above. echo [ERR] Dependency install failed. See errors above.
pause pause
exit /b 1 exit /b 1
) )
echo. echo.
echo Installation completed successfully! echo Installation completed successfully!
echo. echo Try:
echo You can now run unshackle using:
echo uv run unshackle --help echo uv run unshackle --help
echo. echo.
pause pause
endlocal

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project] [project]
name = "unshackle" name = "unshackle"
version = "1.0.1" version = "1.4.0"
description = "Modular Movie, TV, and Music Archival Software." description = "Modular Movie, TV, and Music Archival Software."
authors = [{ name = "unshackle team" }] authors = [{ name = "unshackle team" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
@@ -57,6 +57,7 @@ dependencies = [
"pyplayready>=0.6.0,<0.7", "pyplayready>=0.6.0,<0.7",
"httpx>=0.28.1,<0.29", "httpx>=0.28.1,<0.29",
"cryptography>=45.0.0", "cryptography>=45.0.0",
"subby",
] ]
[project.urls] [project.urls]
@@ -112,3 +113,4 @@ no_implicit_optional = true
[tool.uv.sources] [tool.uv.sources]
unshackle = { workspace = true } unshackle = { workspace = true }
subby = { git = "https://github.com/vevv/subby.git" }

View File

View File

@@ -65,7 +65,7 @@ def cfg(ctx: click.Context, key: str, value: str, unset: bool, list_: bool) -> N
if not is_write and not is_delete: if not is_write and not is_delete:
data = data.mlget(key_items, default=KeyError) data = data.mlget(key_items, default=KeyError)
if data == KeyError: if data is KeyError:
raise click.ClickException(f"Key '{key}' does not exist in the config.") raise click.ClickException(f"Key '{key}' does not exist in the config.")
yaml.dump(data, sys.stdout) yaml.dump(data, sys.stdout)
else: else:

View File

@@ -48,13 +48,14 @@ from unshackle.core.constants import DOWNLOAD_LICENCE_ONLY, AnyTrack, context_se
from unshackle.core.credential import Credential from unshackle.core.credential import Credential
from unshackle.core.drm import DRM_T, PlayReady, Widevine from unshackle.core.drm import DRM_T, PlayReady, Widevine
from unshackle.core.events import events from unshackle.core.events import events
from unshackle.core.proxies import Basic, Hola, NordVPN from unshackle.core.proxies import Basic, Hola, NordVPN, SurfsharkVPN
from unshackle.core.service import Service from unshackle.core.service import Service
from unshackle.core.services import Services from unshackle.core.services import Services
from unshackle.core.titles import Movie, Movies, Series, Song, Title_T from unshackle.core.titles import Movie, Movies, Series, Song, Title_T
from unshackle.core.titles.episode import Episode from unshackle.core.titles.episode import Episode
from unshackle.core.tracks import Audio, Subtitle, Tracks, Video from unshackle.core.tracks import Audio, Subtitle, Tracks, Video
from unshackle.core.tracks.attachment import Attachment from unshackle.core.tracks.attachment import Attachment
from unshackle.core.tracks.hybrid import Hybrid
from unshackle.core.utilities import get_system_fonts, is_close_match, time_elapsed_since from unshackle.core.utilities import get_system_fonts, is_close_match, time_elapsed_since
from unshackle.core.utils import tags from unshackle.core.utils import tags
from unshackle.core.utils.click_types import (LANGUAGE_RANGE, QUALITY_LIST, SEASON_RANGE, ContextData, MultipleChoice, from unshackle.core.utils.click_types import (LANGUAGE_RANGE, QUALITY_LIST, SEASON_RANGE, ContextData, MultipleChoice,
@@ -138,7 +139,13 @@ class dl:
default=None, default=None,
help="Wanted episodes, e.g. `S01-S05,S07`, `S01E01-S02E03`, `S02-S02E03`, e.t.c, defaults to all.", help="Wanted episodes, e.g. `S01-S05,S07`, `S01E01-S02E03`, `S02-S02E03`, e.t.c, defaults to all.",
) )
@click.option("-l", "--lang", type=LANGUAGE_RANGE, default="en", help="Language wanted for Video and Audio.") @click.option(
"-l",
"--lang",
type=LANGUAGE_RANGE,
default="en",
help="Language wanted for Video and Audio. Use 'orig' to select the original language, e.g. 'orig,en' for both original and English.",
)
@click.option( @click.option(
"-vl", "-vl",
"--v-lang", "--v-lang",
@@ -147,6 +154,7 @@ class dl:
help="Language wanted for Video, you would use this if the video language doesn't match the audio.", help="Language wanted for Video, you would use this if the video language doesn't match the audio.",
) )
@click.option("-sl", "--s-lang", type=LANGUAGE_RANGE, default=["all"], help="Language wanted for Subtitles.") @click.option("-sl", "--s-lang", type=LANGUAGE_RANGE, default=["all"], help="Language wanted for Subtitles.")
@click.option("-fs", "--forced-subs", is_flag=True, default=False, help="Include forced subtitle tracks.")
@click.option( @click.option(
"--proxy", "--proxy",
type=str, type=str,
@@ -294,11 +302,41 @@ class dl:
with console.status("Loading Key Vaults...", spinner="dots"): with console.status("Loading Key Vaults...", spinner="dots"):
self.vaults = Vaults(self.service) self.vaults = Vaults(self.service)
total_vaults = len(config.key_vaults)
failed_vaults = []
for vault in config.key_vaults: for vault in config.key_vaults:
vault_type = vault["type"] vault_type = vault["type"]
del vault["type"] vault_name = vault.get("name", vault_type)
self.vaults.load(vault_type, **vault) vault_copy = vault.copy()
self.log.info(f"Loaded {len(self.vaults)} Vaults") del vault_copy["type"]
if vault_type.lower() == "sqlite":
try:
self.vaults.load_critical(vault_type, **vault_copy)
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
except Exception as e:
self.log.error(f"vault failure: {vault_name} ({vault_type}) - {e}")
raise
else:
# Other vaults (MySQL, HTTP, API) - soft fail
if not self.vaults.load(vault_type, **vault_copy):
failed_vaults.append(vault_name)
self.log.debug(f"Failed to load vault: {vault_name} ({vault_type})")
else:
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
loaded_count = len(self.vaults)
if failed_vaults:
self.log.warning(f"Failed to load {len(failed_vaults)} vault(s): {', '.join(failed_vaults)}")
self.log.info(f"Loaded {loaded_count}/{total_vaults} Vaults")
# Debug: Show detailed vault status
if loaded_count > 0:
vault_names = [vault.name for vault in self.vaults]
self.log.debug(f"Active vaults: {', '.join(vault_names)}")
else:
self.log.debug("No vaults are currently active")
self.proxy_providers = [] self.proxy_providers = []
if no_proxy: if no_proxy:
@@ -309,6 +347,8 @@ class dl:
self.proxy_providers.append(Basic(**config.proxy_providers["basic"])) self.proxy_providers.append(Basic(**config.proxy_providers["basic"]))
if config.proxy_providers.get("nordvpn"): if config.proxy_providers.get("nordvpn"):
self.proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"])) self.proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"]))
if config.proxy_providers.get("surfsharkvpn"):
self.proxy_providers.append(SurfsharkVPN(**config.proxy_providers["surfsharkvpn"]))
if binaries.HolaProxy: if binaries.HolaProxy:
self.proxy_providers.append(Hola()) self.proxy_providers.append(Hola())
for proxy_provider in self.proxy_providers: for proxy_provider in self.proxy_providers:
@@ -372,6 +412,7 @@ class dl:
lang: list[str], lang: list[str],
v_lang: list[str], v_lang: list[str],
s_lang: list[str], s_lang: list[str],
forced_subs: bool,
sub_format: Optional[Subtitle.Codec], sub_format: Optional[Subtitle.Codec],
video_only: bool, video_only: bool,
audio_only: bool, audio_only: bool,
@@ -397,6 +438,15 @@ class dl:
self.tmdb_searched = False self.tmdb_searched = False
start_time = time.time() start_time = time.time()
# Check if dovi_tool is available when hybrid mode is requested
if any(r == Video.Range.HYBRID for r in range_):
from unshackle.core.binaries import DoviTool
if not DoviTool:
self.log.error("Unable to run hybrid mode: dovi_tool not detected")
self.log.error("Please install dovi_tool from https://github.com/quietvoid/dovi_tool")
sys.exit(1)
if cdm_only is None: if cdm_only is None:
vaults_only = None vaults_only = None
else: else:
@@ -491,7 +541,12 @@ class dl:
events.subscribe(events.Types.TRACK_REPACKED, service.on_track_repacked) events.subscribe(events.Types.TRACK_REPACKED, service.on_track_repacked)
events.subscribe(events.Types.TRACK_MULTIPLEX, service.on_track_multiplex) events.subscribe(events.Types.TRACK_MULTIPLEX, service.on_track_multiplex)
if no_subs: if hasattr(service, "NO_SUBTITLES") and service.NO_SUBTITLES:
console.log("Skipping subtitles - service does not support subtitle downloads")
no_subs = True
s_lang = None
title.tracks.subtitles = []
elif no_subs:
console.log("Skipped subtitles as --no-subs was used...") console.log("Skipped subtitles as --no-subs was used...")
s_lang = None s_lang = None
title.tracks.subtitles = [] title.tracks.subtitles = []
@@ -518,8 +573,31 @@ class dl:
) )
with console.status("Sorting tracks by language and bitrate...", spinner="dots"): with console.status("Sorting tracks by language and bitrate...", spinner="dots"):
title.tracks.sort_videos(by_language=v_lang or lang) video_sort_lang = v_lang or lang
title.tracks.sort_audio(by_language=lang) processed_video_sort_lang = []
for language in video_sort_lang:
if language == "orig":
if title.language:
orig_lang = str(title.language) if hasattr(title.language, "__str__") else title.language
if orig_lang not in processed_video_sort_lang:
processed_video_sort_lang.append(orig_lang)
else:
if language not in processed_video_sort_lang:
processed_video_sort_lang.append(language)
processed_audio_sort_lang = []
for language in lang:
if language == "orig":
if title.language:
orig_lang = str(title.language) if hasattr(title.language, "__str__") else title.language
if orig_lang not in processed_audio_sort_lang:
processed_audio_sort_lang.append(orig_lang)
else:
if language not in processed_audio_sort_lang:
processed_audio_sort_lang.append(language)
title.tracks.sort_videos(by_language=processed_video_sort_lang)
title.tracks.sort_audio(by_language=processed_audio_sort_lang)
title.tracks.sort_subtitles(by_language=s_lang) title.tracks.sort_subtitles(by_language=s_lang)
if list_: if list_:
@@ -537,10 +615,12 @@ class dl:
sys.exit(1) sys.exit(1)
if range_: if range_:
title.tracks.select_video(lambda x: x.range in range_) # Special handling for HYBRID - don't filter, keep all HDR10 and DV tracks
missing_ranges = [r for r in range_ if not any(x.range == r for x in title.tracks.videos)] if Video.Range.HYBRID not in range_:
for color_range in missing_ranges: title.tracks.select_video(lambda x: x.range in range_)
self.log.warning(f"Skipping {color_range.name} video tracks as none are available.") missing_ranges = [r for r in range_ if not any(x.range == r for x in title.tracks.videos)]
for color_range in missing_ranges:
self.log.warning(f"Skipping {color_range.name} video tracks as none are available.")
if vbitrate: if vbitrate:
title.tracks.select_video(lambda x: x.bitrate and x.bitrate // 1000 == vbitrate) title.tracks.select_video(lambda x: x.bitrate and x.bitrate // 1000 == vbitrate)
@@ -548,47 +628,84 @@ class dl:
self.log.error(f"There's no {vbitrate}kbps Video Track...") self.log.error(f"There's no {vbitrate}kbps Video Track...")
sys.exit(1) sys.exit(1)
# Filter out "best" from the video languages list.
video_languages = [lang for lang in (v_lang or lang) if lang != "best"] video_languages = [lang for lang in (v_lang or lang) if lang != "best"]
if video_languages and "all" not in video_languages: if video_languages and "all" not in video_languages:
title.tracks.videos = title.tracks.by_language(title.tracks.videos, video_languages) processed_video_lang = []
for language in video_languages:
if language == "orig":
if title.language:
orig_lang = (
str(title.language) if hasattr(title.language, "__str__") else title.language
)
if orig_lang not in processed_video_lang:
processed_video_lang.append(orig_lang)
else:
self.log.warning(
"Original language not available for title, skipping 'orig' selection for video"
)
else:
if language not in processed_video_lang:
processed_video_lang.append(language)
title.tracks.videos = title.tracks.by_language(title.tracks.videos, processed_video_lang)
if not title.tracks.videos: if not title.tracks.videos:
self.log.error(f"There's no {video_languages} Video Track...") self.log.error(f"There's no {processed_video_lang} Video Track...")
sys.exit(1) sys.exit(1)
if quality: if quality:
title.tracks.by_resolutions(quality)
missing_resolutions = [] missing_resolutions = []
for resolution in quality: if any(r == Video.Range.HYBRID for r in range_):
if any(video.height == resolution for video in title.tracks.videos): title.tracks.select_video(title.tracks.select_hybrid(title.tracks.videos, quality))
continue else:
if any(int(video.width * (9 / 16)) == resolution for video in title.tracks.videos): title.tracks.by_resolutions(quality)
continue
missing_resolutions.append(resolution) for resolution in quality:
if any(v.height == resolution for v in title.tracks.videos):
continue
if any(int(v.width * 9 / 16) == resolution for v in title.tracks.videos):
continue
missing_resolutions.append(resolution)
if missing_resolutions: if missing_resolutions:
res_list = "" res_list = ""
if len(missing_resolutions) > 1: if len(missing_resolutions) > 1:
res_list = (", ".join([f"{x}p" for x in missing_resolutions[:-1]])) + " or " res_list = ", ".join([f"{x}p" for x in missing_resolutions[:-1]]) + " or "
res_list = f"{res_list}{missing_resolutions[-1]}p" res_list = f"{res_list}{missing_resolutions[-1]}p"
plural = "s" if len(missing_resolutions) > 1 else "" plural = "s" if len(missing_resolutions) > 1 else ""
self.log.error(f"There's no {res_list} Video Track{plural}...") self.log.error(f"There's no {res_list} Video Track{plural}...")
sys.exit(1) sys.exit(1)
# choose best track by range and quality # choose best track by range and quality
selected_videos: list[Video] = [] if any(r == Video.Range.HYBRID for r in range_):
for resolution, color_range in product(quality or [None], range_ or [None]): # For hybrid mode, always apply hybrid selection
match = next( # If no quality specified, use only the best (highest) resolution
( if not quality:
t # Get the highest resolution available
for t in title.tracks.videos best_resolution = max((v.height for v in title.tracks.videos), default=None)
if (not resolution or t.height == resolution or int(t.width * (9 / 16)) == resolution) if best_resolution:
and (not color_range or t.range == color_range) # Use the hybrid selection logic with only the best resolution
), title.tracks.select_video(
None, title.tracks.select_hybrid(title.tracks.videos, [best_resolution])
) )
if match and match not in selected_videos: # If quality was specified, hybrid selection was already applied above
selected_videos.append(match) else:
title.tracks.videos = selected_videos selected_videos: list[Video] = []
for resolution, color_range in product(quality or [None], range_ or [None]):
match = next(
(
t
for t in title.tracks.videos
if (
not resolution
or t.height == resolution
or int(t.width * (9 / 16)) == resolution
)
and (not color_range or t.range == color_range)
),
None,
)
if match and match not in selected_videos:
selected_videos.append(match)
title.tracks.videos = selected_videos
# filter subtitle tracks # filter subtitle tracks
if s_lang and "all" not in s_lang: if s_lang and "all" not in s_lang:
@@ -606,7 +723,8 @@ class dl:
self.log.error(f"There's no {s_lang} Subtitle Track...") self.log.error(f"There's no {s_lang} Subtitle Track...")
sys.exit(1) sys.exit(1)
title.tracks.select_subtitles(lambda x: not x.forced or is_close_match(x.language, lang)) if not forced_subs:
title.tracks.select_subtitles(lambda x: not x.forced or is_close_match(x.language, lang))
# filter audio tracks # filter audio tracks
# might have no audio tracks if part of the video, e.g. transport stream hls # might have no audio tracks if part of the video, e.g. transport stream hls
@@ -633,8 +751,24 @@ class dl:
self.log.error(f"There's no {abitrate}kbps Audio Track...") self.log.error(f"There's no {abitrate}kbps Audio Track...")
sys.exit(1) sys.exit(1)
if lang: if lang:
if "best" in lang: processed_lang = []
# Get unique languages and select highest quality for each for language in lang:
if language == "orig":
if title.language:
orig_lang = (
str(title.language) if hasattr(title.language, "__str__") else title.language
)
if orig_lang not in processed_lang:
processed_lang.append(orig_lang)
else:
self.log.warning(
"Original language not available for title, skipping 'orig' selection"
)
else:
if language not in processed_lang:
processed_lang.append(language)
if "best" in processed_lang:
unique_languages = {track.language for track in title.tracks.audio} unique_languages = {track.language for track in title.tracks.audio}
selected_audio = [] selected_audio = []
for language in unique_languages: for language in unique_languages:
@@ -644,30 +778,36 @@ class dl:
) )
selected_audio.append(highest_quality) selected_audio.append(highest_quality)
title.tracks.audio = selected_audio title.tracks.audio = selected_audio
elif "all" not in lang: elif "all" not in processed_lang:
title.tracks.audio = title.tracks.by_language(title.tracks.audio, lang, per_language=1) per_language = 0 if len(processed_lang) > 1 else 1
title.tracks.audio = title.tracks.by_language(
title.tracks.audio, processed_lang, per_language=per_language
)
if not title.tracks.audio: if not title.tracks.audio:
self.log.error(f"There's no {lang} Audio Track, cannot continue...") self.log.error(f"There's no {processed_lang} Audio Track, cannot continue...")
sys.exit(1) sys.exit(1)
if video_only or audio_only or subs_only or chapters_only or no_subs or no_audio or no_chapters: if video_only or audio_only or subs_only or chapters_only or no_subs or no_audio or no_chapters:
# Determine which track types to keep based on the flags keep_videos = False
keep_videos = True keep_audio = False
keep_audio = True keep_subtitles = False
keep_subtitles = True keep_chapters = False
keep_chapters = True
# Handle exclusive flags (only keep one type) if video_only or audio_only or subs_only or chapters_only:
if video_only: if video_only:
keep_audio = keep_subtitles = keep_chapters = False keep_videos = True
elif audio_only: if audio_only:
keep_videos = keep_subtitles = keep_chapters = False keep_audio = True
elif subs_only: if subs_only:
keep_videos = keep_audio = keep_chapters = False keep_subtitles = True
elif chapters_only: if chapters_only:
keep_videos = keep_audio = keep_subtitles = False keep_chapters = True
else:
keep_videos = True
keep_audio = True
keep_subtitles = True
keep_chapters = True
# Handle exclusion flags (remove specific types)
if no_subs: if no_subs:
keep_subtitles = False keep_subtitles = False
if no_audio: if no_audio:
@@ -675,7 +815,6 @@ class dl:
if no_chapters: if no_chapters:
keep_chapters = False keep_chapters = False
# Build the kept_tracks list without duplicates
kept_tracks = [] kept_tracks = []
if keep_videos: if keep_videos:
kept_tracks.extend(title.tracks.videos) kept_tracks.extend(title.tracks.videos)
@@ -772,6 +911,7 @@ class dl:
while ( while (
not title.tracks.subtitles not title.tracks.subtitles
and not no_subs and not no_subs
and not (hasattr(service, "NO_SUBTITLES") and service.NO_SUBTITLES)
and not video_only and not video_only
and len(title.tracks.videos) > video_track_n and len(title.tracks.videos) > video_track_n
and any( and any(
@@ -845,6 +985,34 @@ class dl:
if font_count: if font_count:
self.log.info(f"Attached {font_count} fonts for the Subtitles") self.log.info(f"Attached {font_count} fonts for the Subtitles")
# Handle DRM decryption BEFORE repacking (must decrypt first!)
service_name = service.__class__.__name__.upper()
decryption_method = config.decryption_map.get(service_name, config.decryption)
use_mp4decrypt = decryption_method.lower() == "mp4decrypt"
if use_mp4decrypt:
decrypt_tool = "mp4decrypt"
else:
decrypt_tool = "Shaka Packager"
drm_tracks = [track for track in title.tracks if track.drm]
if drm_tracks:
with console.status(f"Decrypting tracks with {decrypt_tool}..."):
has_decrypted = False
for track in drm_tracks:
drm = track.get_drm_for_cdm(self.cdm)
if drm and hasattr(drm, "decrypt"):
drm.decrypt(track.path, use_mp4decrypt=use_mp4decrypt)
has_decrypted = True
events.emit(events.Types.TRACK_REPACKED, track=track)
else:
self.log.warning(
f"No matching DRM found for track {track} with CDM type {type(self.cdm).__name__}"
)
if has_decrypted:
self.log.info(f"Decrypted tracks with {decrypt_tool}")
# Now repack the decrypted tracks
with console.status("Repackaging tracks with FFMPEG..."): with console.status("Repackaging tracks with FFMPEG..."):
has_repacked = False has_repacked = False
for track in title.tracks: for track in title.tracks:
@@ -869,21 +1037,79 @@ class dl:
) )
multiplex_tasks: list[tuple[TaskID, Tracks]] = [] multiplex_tasks: list[tuple[TaskID, Tracks]] = []
for video_track in title.tracks.videos or [None]:
task_description = "Multiplexing"
if video_track:
if len(quality) > 1:
task_description += f" {video_track.height}p"
if len(range_) > 1:
task_description += f" {video_track.range.name}"
task_id = progress.add_task(f"{task_description}...", total=None, start=False) # Check if we're in hybrid mode
if any(r == Video.Range.HYBRID for r in range_) and title.tracks.videos:
# Hybrid mode: process DV and HDR10 tracks separately for each resolution
self.log.info("Processing Hybrid HDR10+DV tracks...")
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments # Group video tracks by resolution
if video_track: resolutions_processed = set()
task_tracks.videos = [video_track] hdr10_tracks = [v for v in title.tracks.videos if v.range == Video.Range.HDR10]
dv_tracks = [v for v in title.tracks.videos if v.range == Video.Range.DV]
multiplex_tasks.append((task_id, task_tracks)) for hdr10_track in hdr10_tracks:
resolution = hdr10_track.height
if resolution in resolutions_processed:
continue
resolutions_processed.add(resolution)
# Find matching DV track for this resolution (use the lowest DV resolution)
matching_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
if matching_dv:
# Create track pair for this resolution
resolution_tracks = [hdr10_track, matching_dv]
for track in resolution_tracks:
track.needs_duration_fix = True
# Run the hybrid processing for this resolution
Hybrid(resolution_tracks, self.service)
# Create unique output filename for this resolution
hybrid_filename = f"HDR10-DV-{resolution}p.hevc"
hybrid_output_path = config.directories.temp / hybrid_filename
# The Hybrid class creates HDR10-DV.hevc, rename it for this resolution
default_output = config.directories.temp / "HDR10-DV.hevc"
if default_output.exists():
shutil.move(str(default_output), str(hybrid_output_path))
# Create a mux task for this resolution
task_description = f"Multiplexing Hybrid HDR10+DV {resolution}p"
task_id = progress.add_task(f"{task_description}...", total=None, start=False)
# Create tracks with the hybrid video output for this resolution
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments
# Create a new video track for the hybrid output
hybrid_track = deepcopy(hdr10_track)
hybrid_track.path = hybrid_output_path
hybrid_track.range = Video.Range.DV # It's now a DV track
hybrid_track.needs_duration_fix = True
task_tracks.videos = [hybrid_track]
multiplex_tasks.append((task_id, task_tracks))
console.print()
else:
# Normal mode: process each video track separately
for video_track in title.tracks.videos or [None]:
task_description = "Multiplexing"
if video_track:
if len(quality) > 1:
task_description += f" {video_track.height}p"
if len(range_) > 1:
task_description += f" {video_track.range.name}"
task_id = progress.add_task(f"{task_description}...", total=None, start=False)
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments
if video_track:
task_tracks.videos = [video_track]
multiplex_tasks.append((task_id, task_tracks))
with Live(Padding(progress, (0, 5, 1, 5)), console=console): with Live(Padding(progress, (0, 5, 1, 5)), console=console):
for task_id, task_tracks in multiplex_tasks: for task_id, task_tracks in multiplex_tasks:

View File

@@ -10,11 +10,11 @@ from rich.padding import Padding
from rich.table import Table from rich.table import Table
from rich.tree import Tree from rich.tree import Tree
from unshackle.core import binaries
from unshackle.core.config import POSSIBLE_CONFIG_PATHS, config, config_path from unshackle.core.config import POSSIBLE_CONFIG_PATHS, config, config_path
from unshackle.core.console import console from unshackle.core.console import console
from unshackle.core.constants import context_settings from unshackle.core.constants import context_settings
from unshackle.core.services import Services from unshackle.core.services import Services
from unshackle.core.utils.osenvironment import get_os_arch
@click.group(short_help="Manage and configure the project environment.", context_settings=context_settings) @click.group(short_help="Manage and configure the project environment.", context_settings=context_settings)
@@ -25,45 +25,134 @@ def env() -> None:
@env.command() @env.command()
def check() -> None: def check() -> None:
"""Checks environment for the required dependencies.""" """Checks environment for the required dependencies."""
table = Table(title="Dependencies", expand=True) # Define all dependencies
table.add_column("Name", no_wrap=True) all_deps = [
table.add_column("Installed", justify="center") # Core Media Tools
table.add_column("Path", no_wrap=False, overflow="fold") {"name": "FFmpeg", "binary": binaries.FFMPEG, "required": True, "desc": "Media processing", "cat": "Core"},
{"name": "FFprobe", "binary": binaries.FFProbe, "required": True, "desc": "Media analysis", "cat": "Core"},
# builds shaka-packager based on os, arch {"name": "MKVToolNix", "binary": binaries.MKVToolNix, "required": True, "desc": "MKV muxing", "cat": "Core"},
packager_dep = get_os_arch("packager") {
"name": "mkvpropedit",
# Helper function to find binary with multiple possible names "binary": binaries.Mkvpropedit,
def find_binary(*names): "required": True,
for name in names: "desc": "MKV metadata",
if shutil.which(name): "cat": "Core",
return name },
return names[0] # Return first name as fallback for display {
"name": "shaka-packager",
dependencies = [ "binary": binaries.ShakaPackager,
{"name": "CCExtractor", "binary": "ccextractor"}, "required": True,
{"name": "FFMpeg", "binary": "ffmpeg"}, "desc": "DRM decryption",
{"name": "MKVToolNix", "binary": "mkvmerge"}, "cat": "DRM",
{"name": "Shaka-Packager", "binary": packager_dep}, },
{"name": "N_m3u8DL-RE", "binary": find_binary("N_m3u8DL-RE", "n-m3u8dl-re")}, {
{"name": "Aria2(c)", "binary": "aria2c"}, "name": "mp4decrypt",
"binary": binaries.Mp4decrypt,
"required": False,
"desc": "DRM decryption",
"cat": "DRM",
},
# HDR Processing
{"name": "dovi_tool", "binary": binaries.DoviTool, "required": False, "desc": "Dolby Vision", "cat": "HDR"},
{
"name": "HDR10Plus_tool",
"binary": binaries.HDR10PlusTool,
"required": False,
"desc": "HDR10+ metadata",
"cat": "HDR",
},
# Downloaders
{"name": "aria2c", "binary": binaries.Aria2, "required": False, "desc": "Multi-thread DL", "cat": "Download"},
{
"name": "N_m3u8DL-RE",
"binary": binaries.N_m3u8DL_RE,
"required": False,
"desc": "HLS/DASH/ISM",
"cat": "Download",
},
# Subtitle Tools
{
"name": "SubtitleEdit",
"binary": binaries.SubtitleEdit,
"required": False,
"desc": "Sub conversion",
"cat": "Subtitle",
},
{
"name": "CCExtractor",
"binary": binaries.CCExtractor,
"required": False,
"desc": "CC extraction",
"cat": "Subtitle",
},
# Media Players
{"name": "FFplay", "binary": binaries.FFPlay, "required": False, "desc": "Simple player", "cat": "Player"},
{"name": "MPV", "binary": binaries.MPV, "required": False, "desc": "Advanced player", "cat": "Player"},
# Network Tools
{
"name": "HolaProxy",
"binary": binaries.HolaProxy,
"required": False,
"desc": "Proxy service",
"cat": "Network",
},
{"name": "Caddy", "binary": binaries.Caddy, "required": False, "desc": "Web server", "cat": "Network"},
] ]
for dep in dependencies: # Track overall status
path = shutil.which(dep["binary"]) all_required_installed = True
total_installed = 0
total_required = 0
missing_required = []
# Create a single table
table = Table(
title="Environment Dependencies", title_style="bold", show_header=True, header_style="bold", expand=False
)
table.add_column("Category", style="bold cyan", width=10)
table.add_column("Tool", width=16)
table.add_column("Status", justify="center", width=10)
table.add_column("Req", justify="center", width=4)
table.add_column("Purpose", style="bright_black", width=20)
last_cat = None
for dep in all_deps:
path = dep["binary"]
# Category column (only show when it changes)
category = dep["cat"] if dep["cat"] != last_cat else ""
last_cat = dep["cat"]
# Status
if path: if path:
installed = "[green]:heavy_check_mark:[/green]" status = "[green][/green]"
path_output = path.lower() total_installed += 1
else: else:
installed = "[red]:x:[/red]" status = "[red][/red]"
path_output = "Not Found" if dep["required"]:
all_required_installed = False
missing_required.append(dep["name"])
# Add to the table if dep["required"]:
table.add_row(dep["name"], installed, path_output) total_required += 1
# Display the result # Required column (compact)
console.print(Padding(table, (1, 5))) req = "[red]Y[/red]" if dep["required"] else "[bright_black]-[/bright_black]"
# Add row
table.add_row(category, dep["name"], status, req, dep["desc"])
console.print(Padding(table, (1, 2)))
# Compact summary
summary_parts = [f"[bold]Total:[/bold] {total_installed}/{len(all_deps)}"]
if all_required_installed:
summary_parts.append("[green]All required tools installed ✓[/green]")
else:
summary_parts.append(f"[red]Missing required: {', '.join(missing_required)}[/red]")
console.print(Padding(" ".join(summary_parts), (1, 2)))
@env.command() @env.command()
@@ -79,7 +168,7 @@ def info() -> None:
tree.add(f"[repr.number]{i}.[/] [text2]{path.resolve()}[/]") tree.add(f"[repr.number]{i}.[/] [text2]{path.resolve()}[/]")
console.print(Padding(tree, (0, 5))) console.print(Padding(tree, (0, 5)))
table = Table(title="Directories", expand=True) table = Table(title="Directories", title_style="bold", expand=True)
table.add_column("Name", no_wrap=True) table.add_column("Name", no_wrap=True)
table.add_column("Path", no_wrap=False, overflow="fold") table.add_column("Path", no_wrap=False, overflow="fold")
@@ -92,12 +181,21 @@ def info() -> None:
for name in sorted(dir(config.directories)): for name in sorted(dir(config.directories)):
if name.startswith("__") or name == "app_dirs": if name.startswith("__") or name == "app_dirs":
continue continue
path = getattr(config.directories, name).resolve() attr_value = getattr(config.directories, name)
for var, var_path in path_vars.items():
if path.is_relative_to(var_path): # Handle both single Path objects and lists of Path objects
path = rf"%{var}%\{path.relative_to(var_path)}" if isinstance(attr_value, list):
break # For lists, show each path on a separate line
table.add_row(name.title(), str(path)) paths_str = "\n".join(str(path.resolve()) for path in attr_value)
table.add_row(name.title(), paths_str)
else:
# For single Path objects, use the original logic
path = attr_value.resolve()
for var, var_path in path_vars.items():
if path.is_relative_to(var_path):
path = rf"%{var}%\{path.relative_to(var_path)}"
break
table.add_row(name.title(), str(path))
console.print(Padding(table, (1, 5))) console.print(Padding(table, (1, 5)))

View File

@@ -46,7 +46,8 @@ def copy(to_vault: str, from_vaults: list[str], service: Optional[str] = None) -
vault_type = vault["type"] vault_type = vault["type"]
vault_args = vault.copy() vault_args = vault.copy()
del vault_args["type"] del vault_args["type"]
vaults.load(vault_type, **vault_args) if not vaults.load(vault_type, **vault_args):
raise click.ClickException(f"Failed to load vault ({vault_name}).")
to_vault: Vault = vaults.vaults[0] to_vault: Vault = vaults.vaults[0]
from_vaults: list[Vault] = vaults.vaults[1:] from_vaults: list[Vault] = vaults.vaults[1:]

View File

@@ -16,7 +16,7 @@ from unshackle.core import binaries
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.console import console from unshackle.core.console import console
from unshackle.core.constants import context_settings from unshackle.core.constants import context_settings
from unshackle.core.proxies import Basic, Hola, NordVPN from unshackle.core.proxies import Basic, Hola, NordVPN, SurfsharkVPN
from unshackle.core.service import Service from unshackle.core.service import Service
from unshackle.core.services import Services from unshackle.core.services import Services
from unshackle.core.utils.click_types import ContextData from unshackle.core.utils.click_types import ContextData
@@ -69,6 +69,8 @@ def search(ctx: click.Context, no_proxy: bool, profile: Optional[str] = None, pr
proxy_providers.append(Basic(**config.proxy_providers["basic"])) proxy_providers.append(Basic(**config.proxy_providers["basic"]))
if config.proxy_providers.get("nordvpn"): if config.proxy_providers.get("nordvpn"):
proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"])) proxy_providers.append(NordVPN(**config.proxy_providers["nordvpn"]))
if config.proxy_providers.get("surfsharkvpn"):
proxy_providers.append(SurfsharkVPN(**config.proxy_providers["surfsharkvpn"]))
if binaries.HolaProxy: if binaries.HolaProxy:
proxy_providers.append(Hola()) proxy_providers.append(Hola())
for proxy_provider in proxy_providers: for proxy_provider in proxy_providers:

View File

@@ -1 +1 @@
__version__ = "1.0.1" __version__ = "1.4.0"

View File

@@ -1,6 +1,5 @@
import atexit import atexit
import logging import logging
from datetime import datetime
from pathlib import Path from pathlib import Path
import click import click
@@ -16,6 +15,7 @@ from unshackle.core.commands import Commands
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.console import ComfyRichHandler, console from unshackle.core.console import ComfyRichHandler, console
from unshackle.core.constants import context_settings from unshackle.core.constants import context_settings
from unshackle.core.update_checker import UpdateChecker
from unshackle.core.utilities import rotate_log_file from unshackle.core.utilities import rotate_log_file
LOGGING_PATH = None LOGGING_PATH = None
@@ -69,7 +69,7 @@ def main(version: bool, debug: bool, log_path: Path) -> None:
r" ▀▀▀ ▀▀ █▪ ▀▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀ ·▀ ▀.▀▀▀ ▀▀▀ ", r" ▀▀▀ ▀▀ █▪ ▀▀▀▀ ▀▀▀ · ▀ ▀ ·▀▀▀ ·▀ ▀.▀▀▀ ▀▀▀ ",
style="ascii.art", style="ascii.art",
), ),
f"v[repr.number]{__version__}[/]", "v 3.3.3 Copyright © 2019-2025 rlaphoenix" + f"\nv [repr.number]{__version__}[/] - unshackle",
), ),
(1, 11, 1, 10), (1, 11, 1, 10),
expand=True, expand=True,
@@ -80,6 +80,22 @@ def main(version: bool, debug: bool, log_path: Path) -> None:
if version: if version:
return return
if config.update_checks:
try:
latest_version = UpdateChecker.check_for_updates_sync(__version__)
if latest_version:
console.print(
f"\n[yellow]⚠️ Update available![/yellow] "
f"Current: {__version__} → Latest: [green]{latest_version}[/green]",
justify="center",
)
console.print(
"Visit: https://github.com/unshackle-dl/unshackle/releases/latest\n",
justify="center",
)
except Exception:
pass
@atexit.register @atexit.register
def save_log(): def save_log():

View File

@@ -8,7 +8,24 @@ __shaka_platform = {"win32": "win", "darwin": "osx"}.get(sys.platform, sys.platf
def find(*names: str) -> Optional[Path]: def find(*names: str) -> Optional[Path]:
"""Find the path of the first found binary name.""" """Find the path of the first found binary name."""
# Get the directory containing this file to find the local binaries folder
current_dir = Path(__file__).parent.parent
local_binaries_dir = current_dir / "binaries"
for name in names: for name in names:
# First check local binaries folder
if local_binaries_dir.exists():
local_path = local_binaries_dir / name
if local_path.is_file() and local_path.stat().st_mode & 0o111: # Check if executable
return local_path
# Also check with .exe extension on Windows
if sys.platform == "win32":
local_path_exe = local_binaries_dir / f"{name}.exe"
if local_path_exe.is_file():
return local_path_exe
# Fall back to system PATH
path = shutil.which(name) path = shutil.which(name)
if path: if path:
return Path(path) return Path(path)
@@ -32,6 +49,11 @@ HolaProxy = find("hola-proxy")
MPV = find("mpv") MPV = find("mpv")
Caddy = find("caddy") Caddy = find("caddy")
N_m3u8DL_RE = find("N_m3u8DL-RE", "n-m3u8dl-re") N_m3u8DL_RE = find("N_m3u8DL-RE", "n-m3u8dl-re")
MKVToolNix = find("mkvmerge")
Mkvpropedit = find("mkvpropedit")
DoviTool = find("dovi_tool")
HDR10PlusTool = find("hdr10plus_tool", "HDR10Plus_tool")
Mp4decrypt = find("mp4decrypt")
__all__ = ( __all__ = (
@@ -46,5 +68,10 @@ __all__ = (
"MPV", "MPV",
"Caddy", "Caddy",
"N_m3u8DL_RE", "N_m3u8DL_RE",
"MKVToolNix",
"Mkvpropedit",
"DoviTool",
"HDR10PlusTool",
"Mp4decrypt",
"find", "find",
) )

View File

@@ -14,7 +14,7 @@ class Config:
core_dir = Path(__file__).resolve().parent core_dir = Path(__file__).resolve().parent
namespace_dir = core_dir.parent namespace_dir = core_dir.parent
commands = namespace_dir / "commands" commands = namespace_dir / "commands"
services = namespace_dir / "services" services = [namespace_dir / "services"]
vaults = namespace_dir / "vaults" vaults = namespace_dir / "vaults"
fonts = namespace_dir / "fonts" fonts = namespace_dir / "fonts"
user_configs = core_dir.parent user_configs = core_dir.parent
@@ -45,13 +45,17 @@ class Config:
self.curl_impersonate: dict = kwargs.get("curl_impersonate") or {} self.curl_impersonate: dict = kwargs.get("curl_impersonate") or {}
self.remote_cdm: list[dict] = kwargs.get("remote_cdm") or [] self.remote_cdm: list[dict] = kwargs.get("remote_cdm") or []
self.credentials: dict = kwargs.get("credentials") or {} self.credentials: dict = kwargs.get("credentials") or {}
self.subtitle: dict = kwargs.get("subtitle") or {}
self.directories = self._Directories() self.directories = self._Directories()
for name, path in (kwargs.get("directories") or {}).items(): for name, path in (kwargs.get("directories") or {}).items():
if name.lower() in ("app_dirs", "core_dir", "namespace_dir", "user_configs", "data"): if name.lower() in ("app_dirs", "core_dir", "namespace_dir", "user_configs", "data"):
# these must not be modified by the user # these must not be modified by the user
continue continue
setattr(self.directories, name, Path(path).expanduser()) if name == "services" and isinstance(path, list):
setattr(self.directories, name, [Path(p).expanduser() for p in path])
else:
setattr(self.directories, name, Path(path).expanduser())
downloader_cfg = kwargs.get("downloader") or "requests" downloader_cfg = kwargs.get("downloader") or "requests"
if isinstance(downloader_cfg, dict): if isinstance(downloader_cfg, dict):
@@ -68,13 +72,23 @@ class Config:
self.headers: dict = kwargs.get("headers") or {} self.headers: dict = kwargs.get("headers") or {}
self.key_vaults: list[dict[str, Any]] = kwargs.get("key_vaults", []) self.key_vaults: list[dict[str, Any]] = kwargs.get("key_vaults", [])
self.muxing: dict = kwargs.get("muxing") or {} self.muxing: dict = kwargs.get("muxing") or {}
self.nordvpn: dict = kwargs.get("nordvpn") or {}
self.proxy_providers: dict = kwargs.get("proxy_providers") or {} self.proxy_providers: dict = kwargs.get("proxy_providers") or {}
self.serve: dict = kwargs.get("serve") or {} self.serve: dict = kwargs.get("serve") or {}
self.services: dict = kwargs.get("services") or {} self.services: dict = kwargs.get("services") or {}
decryption_cfg = kwargs.get("decryption") or {}
if isinstance(decryption_cfg, dict):
self.decryption_map = {k.upper(): v for k, v in decryption_cfg.items()}
self.decryption = self.decryption_map.get("DEFAULT", "shaka")
else:
self.decryption_map = {}
self.decryption = decryption_cfg or "shaka"
self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False) self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False)
self.tag: str = kwargs.get("tag") or "" self.tag: str = kwargs.get("tag") or ""
self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or "" self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or ""
self.update_checks: bool = kwargs.get("update_checks", True)
self.update_check_interval: int = kwargs.get("update_check_interval", 24)
self.scene_naming: bool = kwargs.get("scene_naming", True)
@classmethod @classmethod
def from_yaml(cls, path: Path) -> Config: def from_yaml(cls, path: Path) -> Config:

View File

@@ -7,7 +7,7 @@ DOWNLOAD_LICENCE_ONLY = Event()
DRM_SORT_MAP = ["ClearKey", "Widevine"] DRM_SORT_MAP = ["ClearKey", "Widevine"]
LANGUAGE_MAX_DISTANCE = 5 # this is max to be considered "same", e.g., en, en-US, en-AU LANGUAGE_MAX_DISTANCE = 5 # this is max to be considered "same", e.g., en, en-US, en-AU
VIDEO_CODEC_MAP = {"AVC": "H.264", "HEVC": "H.265"} VIDEO_CODEC_MAP = {"AVC": "H.264", "HEVC": "H.265"}
DYNAMIC_RANGE_MAP = {"HDR10": "HDR", "HDR10+": "HDR", "Dolby Vision": "DV"} DYNAMIC_RANGE_MAP = {"HDR10": "HDR", "HDR10+": "HDR10P", "Dolby Vision": "DV", "HDR10 / HDR10+": "HDR10P", "HDR10 / HDR10": "HDR"}
AUDIO_CODEC_MAP = {"E-AC-3": "DDP", "AC-3": "DD"} AUDIO_CODEC_MAP = {"E-AC-3": "DDP", "AC-3": "DD"}
context_settings = dict( context_settings = dict(

View File

@@ -76,6 +76,11 @@ def download(url: str, save_path: Path, session: Session, **kwargs: Any) -> Gene
try: try:
content_length = int(stream.headers.get("Content-Length", "0")) content_length = int(stream.headers.get("Content-Length", "0"))
# Skip Content-Length validation for compressed responses since
# curl_impersonate automatically decompresses but Content-Length shows compressed size
if stream.headers.get("Content-Encoding", "").lower() in ["gzip", "deflate", "br"]:
content_length = 0
except ValueError: except ValueError:
content_length = 0 content_length = 0

View File

@@ -90,6 +90,11 @@ def download(
if not segmented: if not segmented:
try: try:
content_length = int(stream.headers.get("Content-Length", "0")) content_length = int(stream.headers.get("Content-Length", "0"))
# Skip Content-Length validation for compressed responses since
# requests automatically decompresses but Content-Length shows compressed size
if stream.headers.get("Content-Encoding", "").lower() in ["gzip", "deflate", "br"]:
content_length = 0
except ValueError: except ValueError:
content_length = 0 content_length = 0

View File

@@ -39,17 +39,23 @@ class PlayReady:
if not isinstance(pssh, PSSH): if not isinstance(pssh, PSSH):
raise TypeError(f"Expected pssh to be a {PSSH}, not {pssh!r}") raise TypeError(f"Expected pssh to be a {PSSH}, not {pssh!r}")
kids: list[UUID] = [] if pssh_b64:
for header in pssh.wrm_headers: kids = self._extract_kids_from_pssh_b64(pssh_b64)
try: else:
signed_ids, _, _, _ = header.read_attributes() kids = []
except Exception:
continue # Extract KIDs using pyplayready's method (may miss some KIDs)
for signed_id in signed_ids: if not kids:
for header in pssh.wrm_headers:
try: try:
kids.append(UUID(bytes_le=base64.b64decode(signed_id.value))) signed_ids, _, _, _ = header.read_attributes()
except Exception: except Exception:
continue continue
for signed_id in signed_ids:
try:
kids.append(UUID(bytes_le=base64.b64decode(signed_id.value)))
except Exception:
continue
if kid: if kid:
if isinstance(kid, str): if isinstance(kid, str):
@@ -72,6 +78,66 @@ class PlayReady:
if pssh_b64: if pssh_b64:
self.data.setdefault("pssh_b64", pssh_b64) self.data.setdefault("pssh_b64", pssh_b64)
def _extract_kids_from_pssh_b64(self, pssh_b64: str) -> list[UUID]:
"""Extract all KIDs from base64-encoded PSSH data."""
try:
import xml.etree.ElementTree as ET
# Decode the PSSH
pssh_bytes = base64.b64decode(pssh_b64)
# Try to find XML in the PSSH data
# PlayReady PSSH usually has XML embedded in it
pssh_str = pssh_bytes.decode("utf-16le", errors="ignore")
# Find WRMHEADER
xml_start = pssh_str.find("<WRMHEADER")
if xml_start == -1:
# Try UTF-8
pssh_str = pssh_bytes.decode("utf-8", errors="ignore")
xml_start = pssh_str.find("<WRMHEADER")
if xml_start != -1:
clean_xml = pssh_str[xml_start:]
xml_end = clean_xml.find("</WRMHEADER>") + len("</WRMHEADER>")
clean_xml = clean_xml[:xml_end]
root = ET.fromstring(clean_xml)
ns = {"pr": "http://schemas.microsoft.com/DRM/2007/03/PlayReadyHeader"}
kids = []
# Extract from CUSTOMATTRIBUTES/KIDS
kid_elements = root.findall(".//pr:CUSTOMATTRIBUTES/pr:KIDS/pr:KID", ns)
for kid_elem in kid_elements:
value = kid_elem.get("VALUE")
if value:
try:
kid_bytes = base64.b64decode(value + "==")
kid_uuid = UUID(bytes_le=kid_bytes)
kids.append(kid_uuid)
except Exception:
pass
# Also get individual KID
individual_kids = root.findall(".//pr:DATA/pr:KID", ns)
for kid_elem in individual_kids:
if kid_elem.text:
try:
kid_bytes = base64.b64decode(kid_elem.text.strip() + "==")
kid_uuid = UUID(bytes_le=kid_bytes)
if kid_uuid not in kids:
kids.append(kid_uuid)
except Exception:
pass
return kids
except Exception:
pass
return []
@classmethod @classmethod
def from_track(cls, track: AnyTrack, session: Optional[Session] = None) -> PlayReady: def from_track(cls, track: AnyTrack, session: Optional[Session] = None) -> PlayReady:
if not session: if not session:
@@ -187,14 +253,69 @@ class PlayReady:
if not self.content_keys: if not self.content_keys:
raise PlayReady.Exceptions.EmptyLicense("No Content Keys were within the License") raise PlayReady.Exceptions.EmptyLicense("No Content Keys were within the License")
def decrypt(self, path: Path) -> None: def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
"""
Decrypt a Track with PlayReady DRM.
Args:
path: Path to the encrypted file to decrypt
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
Raises:
EnvironmentError if the required decryption executable could not be found.
ValueError if the track has not yet been downloaded.
SubprocessError if the decryption process returned a non-zero exit code.
"""
if not self.content_keys: if not self.content_keys:
raise ValueError("Cannot decrypt a Track without any Content Keys...") raise ValueError("Cannot decrypt a Track without any Content Keys...")
if not binaries.ShakaPackager:
raise EnvironmentError("Shaka Packager executable not found but is required.")
if not path or not path.exists(): if not path or not path.exists():
raise ValueError("Tried to decrypt a file that does not exist.") raise ValueError("Tried to decrypt a file that does not exist.")
if use_mp4decrypt:
return self._decrypt_with_mp4decrypt(path)
else:
return self._decrypt_with_shaka_packager(path)
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
"""Decrypt using mp4decrypt"""
if not binaries.Mp4decrypt:
raise EnvironmentError("mp4decrypt executable not found but is required.")
output_path = path.with_stem(f"{path.stem}_decrypted")
# Build key arguments
key_args = []
for kid, key in self.content_keys.items():
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
key_hex = key if isinstance(key, str) else key.hex()
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
cmd = [
str(binaries.Mp4decrypt),
"--show-progress",
*key_args,
str(path),
str(output_path),
]
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
except subprocess.CalledProcessError as e:
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
if not output_path.exists():
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
if output_path.stat().st_size == 0:
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
path.unlink()
shutil.move(output_path, path)
def _decrypt_with_shaka_packager(self, path: Path) -> None:
"""Decrypt using Shaka Packager (original method)"""
if not binaries.ShakaPackager:
raise EnvironmentError("Shaka Packager executable not found but is required.")
output_path = path.with_stem(f"{path.stem}_decrypted") output_path = path.with_stem(f"{path.stem}_decrypted")
config.directories.temp.mkdir(parents=True, exist_ok=True) config.directories.temp.mkdir(parents=True, exist_ok=True)

View File

@@ -227,22 +227,69 @@ class Widevine:
finally: finally:
cdm.close(session_id) cdm.close(session_id)
def decrypt(self, path: Path) -> None: def decrypt(self, path: Path, use_mp4decrypt: bool = False) -> None:
""" """
Decrypt a Track with Widevine DRM. Decrypt a Track with Widevine DRM.
Args:
path: Path to the encrypted file to decrypt
use_mp4decrypt: If True, use mp4decrypt instead of Shaka Packager
Raises: Raises:
EnvironmentError if the Shaka Packager executable could not be found. EnvironmentError if the required decryption executable could not be found.
ValueError if the track has not yet been downloaded. ValueError if the track has not yet been downloaded.
SubprocessError if Shaka Packager returned a non-zero exit code. SubprocessError if the decryption process returned a non-zero exit code.
""" """
if not self.content_keys: if not self.content_keys:
raise ValueError("Cannot decrypt a Track without any Content Keys...") raise ValueError("Cannot decrypt a Track without any Content Keys...")
if not binaries.ShakaPackager:
raise EnvironmentError("Shaka Packager executable not found but is required.")
if not path or not path.exists(): if not path or not path.exists():
raise ValueError("Tried to decrypt a file that does not exist.") raise ValueError("Tried to decrypt a file that does not exist.")
if use_mp4decrypt:
return self._decrypt_with_mp4decrypt(path)
else:
return self._decrypt_with_shaka_packager(path)
def _decrypt_with_mp4decrypt(self, path: Path) -> None:
"""Decrypt using mp4decrypt"""
if not binaries.Mp4decrypt:
raise EnvironmentError("mp4decrypt executable not found but is required.")
output_path = path.with_stem(f"{path.stem}_decrypted")
# Build key arguments
key_args = []
for kid, key in self.content_keys.items():
kid_hex = kid.hex if hasattr(kid, "hex") else str(kid).replace("-", "")
key_hex = key if isinstance(key, str) else key.hex()
key_args.extend(["--key", f"{kid_hex}:{key_hex}"])
cmd = [
str(binaries.Mp4decrypt),
"--show-progress",
*key_args,
str(path),
str(output_path),
]
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
except subprocess.CalledProcessError as e:
error_msg = e.stderr if e.stderr else f"mp4decrypt failed with exit code {e.returncode}"
raise subprocess.CalledProcessError(e.returncode, cmd, output=e.stdout, stderr=error_msg)
if not output_path.exists():
raise RuntimeError(f"mp4decrypt failed: output file {output_path} was not created")
if output_path.stat().st_size == 0:
raise RuntimeError(f"mp4decrypt failed: output file {output_path} is empty")
path.unlink()
shutil.move(output_path, path)
def _decrypt_with_shaka_packager(self, path: Path) -> None:
"""Decrypt using Shaka Packager (original method)"""
if not binaries.ShakaPackager:
raise EnvironmentError("Shaka Packager executable not found but is required.")
output_path = path.with_stem(f"{path.stem}_decrypted") output_path = path.with_stem(f"{path.stem}_decrypted")
config.directories.temp.mkdir(parents=True, exist_ok=True) config.directories.temp.mkdir(parents=True, exist_ok=True)

View File

@@ -1,5 +1,6 @@
from .basic import Basic from .basic import Basic
from .hola import Hola from .hola import Hola
from .nordvpn import NordVPN from .nordvpn import NordVPN
from .surfsharkvpn import SurfsharkVPN
__all__ = ("Basic", "Hola", "NordVPN") __all__ = ("Basic", "Hola", "NordVPN", "SurfsharkVPN")

View File

@@ -0,0 +1,124 @@
import json
import random
import re
from typing import Optional
import requests
from unshackle.core.proxies.proxy import Proxy
class SurfsharkVPN(Proxy):
def __init__(self, username: str, password: str, server_map: Optional[dict[str, int]] = None):
"""
Proxy Service using SurfsharkVPN Service Credentials.
A username and password must be provided. These are Service Credentials, not your Login Credentials.
The Service Credentials can be found here: https://my.surfshark.com/vpn/manual-setup/main/openvpn
"""
if not username:
raise ValueError("No Username was provided to the SurfsharkVPN Proxy Service.")
if not password:
raise ValueError("No Password was provided to the SurfsharkVPN Proxy Service.")
if not re.match(r"^[a-z0-9]{48}$", username + password, re.IGNORECASE) or "@" in username:
raise ValueError(
"The Username and Password must be SurfsharkVPN Service Credentials, not your Login Credentials. "
"The Service Credentials can be found here: https://my.surfshark.com/vpn/manual-setup/main/openvpn"
)
if server_map is not None and not isinstance(server_map, dict):
raise TypeError(f"Expected server_map to be a dict mapping a region to a server ID, not '{server_map!r}'.")
self.username = username
self.password = password
self.server_map = server_map or {}
self.countries = self.get_countries()
def __repr__(self) -> str:
countries = len(set(x.get("country") for x in self.countries if x.get("country")))
servers = sum(1 for x in self.countries if x.get("connectionName"))
return f"{countries} Countr{['ies', 'y'][countries == 1]} ({servers} Server{['s', ''][servers == 1]})"
def get_proxy(self, query: str) -> Optional[str]:
"""
Get an HTTP(SSL) proxy URI for a SurfsharkVPN server.
"""
query = query.lower()
if re.match(r"^[a-z]{2}\d+$", query):
# country and surfsharkvpn server id, e.g., au-per, be-anr, us-bos
hostname = f"{query}.prod.surfshark.com"
else:
if query.isdigit():
# country id
country = self.get_country(by_id=int(query))
elif re.match(r"^[a-z]+$", query):
# country code
country = self.get_country(by_code=query)
else:
raise ValueError(f"The query provided is unsupported and unrecognized: {query}")
if not country:
# SurfsharkVPN doesnt have servers in this region
return
server_mapping = self.server_map.get(country["countryCode"].lower())
if server_mapping:
# country was set to a specific server ID in config
hostname = f"{country['code'].lower()}{server_mapping}.prod.surfshark.com"
else:
# get the random server ID
random_server = self.get_random_server(country["countryCode"])
if not random_server:
raise ValueError(
f"The SurfsharkVPN Country {query} currently has no random servers. "
"Try again later. If the issue persists, double-check the query."
)
hostname = random_server
return f"https://{self.username}:{self.password}@{hostname}:443"
def get_country(self, by_id: Optional[int] = None, by_code: Optional[str] = None) -> Optional[dict]:
"""Search for a Country and it's metadata."""
if all(x is None for x in (by_id, by_code)):
raise ValueError("At least one search query must be made.")
for country in self.countries:
if all(
[
by_id is None or country["id"] == int(by_id),
by_code is None or country["countryCode"] == by_code.upper(),
]
):
return country
def get_random_server(self, country_id: str):
"""
Get the list of random Server for a Country.
Note: There may not always be more than one recommended server.
"""
country = [x["connectionName"] for x in self.countries if x["countryCode"].lower() == country_id.lower()]
try:
country = random.choice(country)
return country
except Exception:
raise ValueError("Could not get random countrycode from the countries list.")
@staticmethod
def get_countries() -> list[dict]:
"""Get a list of available Countries and their metadata."""
res = requests.get(
url="https://api.surfshark.com/v3/server/clusters/all",
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
"Content-Type": "application/json",
},
)
if not res.ok:
raise ValueError(f"Failed to get a list of SurfsharkVPN countries [{res.status_code}]")
try:
return res.json()
except json.JSONDecodeError:
raise ValueError("Could not decode list of SurfsharkVPN countries, not JSON data.")

View File

@@ -6,7 +6,14 @@ from unshackle.core.config import config
from unshackle.core.service import Service from unshackle.core.service import Service
from unshackle.core.utilities import import_module_by_path from unshackle.core.utilities import import_module_by_path
_SERVICES = sorted((path for path in config.directories.services.glob("*/__init__.py")), key=lambda x: x.parent.stem) _service_dirs = config.directories.services
if not isinstance(_service_dirs, list):
_service_dirs = [_service_dirs]
_SERVICES = sorted(
(path for service_dir in _service_dirs for path in service_dir.glob("*/__init__.py")),
key=lambda x: x.parent.stem,
)
_MODULES = {path.parent.stem: getattr(import_module_by_path(path), path.parent.stem) for path in _SERVICES} _MODULES = {path.parent.stem: getattr(import_module_by_path(path), path.parent.stem) for path in _SERVICES}

View File

@@ -107,67 +107,87 @@ class Episode(Title):
name=self.name or "", name=self.name or "",
).strip() ).strip()
# MULTi if config.scene_naming:
if unique_audio_languages > 1: # Resolution
name += " MULTi" if primary_video_track:
resolution = primary_video_track.height
aspect_ratio = [
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
]
if len(aspect_ratio) == 1:
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
aspect_ratio.append(1)
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
# We want the resolution represented in a 4:3 or 16:9 canvas.
# If it's not 4:3 or 16:9, calculate as if it's inside a 16:9 canvas,
# otherwise the track's height value is fine.
# We are assuming this title is some weird aspect ratio so most
# likely a movie or HD source, so it's most likely widescreen so
# 16:9 canvas makes the most sense.
resolution = int(primary_video_track.width * (9 / 16))
name += f" {resolution}p"
# Resolution # Service
if primary_video_track: if show_service:
resolution = primary_video_track.height name += f" {self.service.__name__}"
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
if len(aspect_ratio) == 1:
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
aspect_ratio.append(1)
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
# We want the resolution represented in a 4:3 or 16:9 canvas.
# If it's not 4:3 or 16:9, calculate as if it's inside a 16:9 canvas,
# otherwise the track's height value is fine.
# We are assuming this title is some weird aspect ratio so most
# likely a movie or HD source, so it's most likely widescreen so
# 16:9 canvas makes the most sense.
resolution = int(primary_video_track.width * (9 / 16))
name += f" {resolution}p"
# Service # 'WEB-DL'
if show_service: name += " WEB-DL"
name += f" {self.service.__name__}"
# 'WEB-DL' # DUAL
name += " WEB-DL" if unique_audio_languages == 2:
name += " DUAL"
# Audio Codec + Channels (+ feature) # MULTi
if primary_audio_track: if unique_audio_languages > 2:
codec = primary_audio_track.format name += " MULTi"
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
if channel_layout:
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
else:
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
channels = float(channel_count)
features = primary_audio_track.format_additionalfeatures or "" # Audio Codec + Channels (+ feature)
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}" if primary_audio_track:
if "JOC" in features or primary_audio_track.joc: codec = primary_audio_track.format
name += " Atmos" channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
if channel_layout:
channels = float(
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
)
else:
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
channels = float(channel_count)
# Video (dynamic range + hfr +) Codec features = primary_audio_track.format_additionalfeatures or ""
if primary_video_track: name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
codec = primary_video_track.format if "JOC" in features or primary_audio_track.joc:
hdr_format = primary_video_track.hdr_format_commercial name += " Atmos"
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
frame_rate = float(primary_video_track.frame_rate)
if hdr_format:
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
elif trc and "HLG" in trc:
name += " HLG"
if frame_rate > 30:
name += " HFR"
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
if config.tag: # Video (dynamic range + hfr +) Codec
name += f"-{config.tag}" if primary_video_track:
codec = primary_video_track.format
hdr_format = primary_video_track.hdr_format_commercial
trc = (
primary_video_track.transfer_characteristics
or primary_video_track.transfer_characteristics_original
)
frame_rate = float(primary_video_track.frame_rate)
if hdr_format:
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
name += " DV"
if DYNAMIC_RANGE_MAP.get(hdr_format) and DYNAMIC_RANGE_MAP.get(hdr_format) != "DV":
name += " HDR"
else:
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
elif trc and "HLG" in trc:
name += " HLG"
if frame_rate > 30:
name += " HFR"
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
return sanitize_filename(name) if config.tag:
name += f"-{config.tag}"
return sanitize_filename(name)
else:
# Simple naming style without technical details - use spaces instead of dots
return sanitize_filename(name, " ")
class Series(SortedKeyList, ABC): class Series(SortedKeyList, ABC):
@@ -182,9 +202,10 @@ class Series(SortedKeyList, ABC):
def tree(self, verbose: bool = False) -> Tree: def tree(self, verbose: bool = False) -> Tree:
seasons = Counter(x.season for x in self) seasons = Counter(x.season for x in self)
num_seasons = len(seasons) num_seasons = len(seasons)
num_episodes = sum(seasons.values()) sum(seasons.values())
season_breakdown = ", ".join(f"S{season}({count})" for season, count in sorted(seasons.items()))
tree = Tree( tree = Tree(
f"{num_seasons} Season{['s', ''][num_seasons == 1]}, {num_episodes} Episode{['s', ''][num_episodes == 1]}", f"{num_seasons} seasons, {season_breakdown}",
guide_style="bright_black", guide_style="bright_black",
) )
if verbose: if verbose:

View File

@@ -58,67 +58,87 @@ class Movie(Title):
# Name (Year) # Name (Year)
name = str(self).replace("$", "S") # e.g., Arli$$ name = str(self).replace("$", "S") # e.g., Arli$$
# MULTi if config.scene_naming:
if unique_audio_languages > 1: # Resolution
name += " MULTi" if primary_video_track:
resolution = primary_video_track.height
aspect_ratio = [
int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")
]
if len(aspect_ratio) == 1:
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
aspect_ratio.append(1)
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
# We want the resolution represented in a 4:3 or 16:9 canvas.
# If it's not 4:3 or 16:9, calculate as if it's inside a 16:9 canvas,
# otherwise the track's height value is fine.
# We are assuming this title is some weird aspect ratio so most
# likely a movie or HD source, so it's most likely widescreen so
# 16:9 canvas makes the most sense.
resolution = int(primary_video_track.width * (9 / 16))
name += f" {resolution}p"
# Resolution # Service
if primary_video_track: if show_service:
resolution = primary_video_track.height name += f" {self.service.__name__}"
aspect_ratio = [int(float(plane)) for plane in primary_video_track.other_display_aspect_ratio[0].split(":")]
if len(aspect_ratio) == 1:
# e.g., aspect ratio of 2 (2.00:1) would end up as `(2.0,)`, add 1
aspect_ratio.append(1)
if aspect_ratio[0] / aspect_ratio[1] not in (16 / 9, 4 / 3):
# We want the resolution represented in a 4:3 or 16:9 canvas.
# If it's not 4:3 or 16:9, calculate as if it's inside a 16:9 canvas,
# otherwise the track's height value is fine.
# We are assuming this title is some weird aspect ratio so most
# likely a movie or HD source, so it's most likely widescreen so
# 16:9 canvas makes the most sense.
resolution = int(primary_video_track.width * (9 / 16))
name += f" {resolution}p"
# Service # 'WEB-DL'
if show_service: name += " WEB-DL"
name += f" {self.service.__name__}"
# 'WEB-DL' # DUAL
name += " WEB-DL" if unique_audio_languages == 2:
name += " DUAL"
# Audio Codec + Channels (+ feature) # MULTi
if primary_audio_track: if unique_audio_languages > 2:
codec = primary_audio_track.format name += " MULTi"
channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
if channel_layout:
channels = float(sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" ")))
else:
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
channels = float(channel_count)
features = primary_audio_track.format_additionalfeatures or "" # Audio Codec + Channels (+ feature)
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}" if primary_audio_track:
if "JOC" in features or primary_audio_track.joc: codec = primary_audio_track.format
name += " Atmos" channel_layout = primary_audio_track.channel_layout or primary_audio_track.channellayout_original
if channel_layout:
channels = float(
sum({"LFE": 0.1}.get(position.upper(), 1) for position in channel_layout.split(" "))
)
else:
channel_count = primary_audio_track.channel_s or primary_audio_track.channels or 0
channels = float(channel_count)
# Video (dynamic range + hfr +) Codec features = primary_audio_track.format_additionalfeatures or ""
if primary_video_track: name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
codec = primary_video_track.format if "JOC" in features or primary_audio_track.joc:
hdr_format = primary_video_track.hdr_format_commercial name += " Atmos"
trc = primary_video_track.transfer_characteristics or primary_video_track.transfer_characteristics_original
frame_rate = float(primary_video_track.frame_rate)
if hdr_format:
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
elif trc and "HLG" in trc:
name += " HLG"
if frame_rate > 30:
name += " HFR"
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
if config.tag: # Video (dynamic range + hfr +) Codec
name += f"-{config.tag}" if primary_video_track:
codec = primary_video_track.format
hdr_format = primary_video_track.hdr_format_commercial
trc = (
primary_video_track.transfer_characteristics
or primary_video_track.transfer_characteristics_original
)
frame_rate = float(primary_video_track.frame_rate)
if hdr_format:
if (primary_video_track.hdr_format or "").startswith("Dolby Vision"):
name += " DV"
if DYNAMIC_RANGE_MAP.get(hdr_format) and DYNAMIC_RANGE_MAP.get(hdr_format) != "DV":
name += " HDR"
else:
name += f" {DYNAMIC_RANGE_MAP.get(hdr_format)} "
elif trc and "HLG" in trc:
name += " HLG"
if frame_rate > 30:
name += " HFR"
name += f" {VIDEO_CODEC_MAP.get(codec, codec)}"
return sanitize_filename(name) if config.tag:
name += f"-{config.tag}"
return sanitize_filename(name)
else:
# Simple naming style without technical details - use spaces instead of dots
return sanitize_filename(name, " ")
class Movies(SortedKeyList, ABC): class Movies(SortedKeyList, ABC):

View File

@@ -100,22 +100,26 @@ class Song(Title):
# NN. Song Name # NN. Song Name
name = str(self).split(" / ")[1] name = str(self).split(" / ")[1]
# Service if config.scene_naming:
if show_service: # Service
name += f" {self.service.__name__}" if show_service:
name += f" {self.service.__name__}"
# 'WEB-DL' # 'WEB-DL'
name += " WEB-DL" name += " WEB-DL"
# Audio Codec + Channels (+ feature) # Audio Codec + Channels (+ feature)
name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}" name += f" {AUDIO_CODEC_MAP.get(codec, codec)}{channels:.1f}"
if "JOC" in features or audio_track.joc: if "JOC" in features or audio_track.joc:
name += " Atmos" name += " Atmos"
if config.tag: if config.tag:
name += f"-{config.tag}" name += f"-{config.tag}"
return sanitize_filename(name, " ") return sanitize_filename(name, " ")
else:
# Simple naming style without technical details
return sanitize_filename(name, " ")
class Album(SortedKeyList, ABC): class Album(SortedKeyList, ABC):

View File

@@ -2,9 +2,10 @@ from .attachment import Attachment
from .audio import Audio from .audio import Audio
from .chapter import Chapter from .chapter import Chapter
from .chapters import Chapters from .chapters import Chapters
from .hybrid import Hybrid
from .subtitle import Subtitle from .subtitle import Subtitle
from .track import Track from .track import Track
from .tracks import Tracks from .tracks import Tracks
from .video import Video from .video import Video
__all__ = ("Audio", "Attachment", "Chapter", "Chapters", "Subtitle", "Track", "Tracks", "Video") __all__ = ("Audio", "Attachment", "Chapter", "Chapters", "Hybrid", "Subtitle", "Track", "Tracks", "Video")

View File

@@ -62,6 +62,7 @@ class Attachment:
session = session or requests.Session() session = session or requests.Session()
response = session.get(url, stream=True) response = session.get(url, stream=True)
response.raise_for_status() response.raise_for_status()
config.directories.temp.mkdir(parents=True, exist_ok=True)
download_path.parent.mkdir(parents=True, exist_ok=True) download_path.parent.mkdir(parents=True, exist_ok=True)
with open(download_path, "wb") as f: with open(download_path, "wb") as f:

View File

@@ -0,0 +1,327 @@
import json
import logging
import os
import subprocess
import sys
from pathlib import Path
from rich.padding import Padding
from rich.rule import Rule
from unshackle.core.binaries import DoviTool, HDR10PlusTool
from unshackle.core.config import config
from unshackle.core.console import console
class Hybrid:
def __init__(self, videos, source) -> None:
self.log = logging.getLogger("hybrid")
"""
Takes the Dolby Vision and HDR10(+) streams out of the VideoTracks.
It will then attempt to inject the Dolby Vision metadata layer to the HDR10(+) stream.
If no DV track is available but HDR10+ is present, it will convert HDR10+ to DV.
"""
global directories
from unshackle.core.tracks import Video
self.videos = videos
self.source = source
self.rpu_file = "RPU.bin"
self.hdr_type = "HDR10"
self.hevc_file = f"{self.hdr_type}-DV.hevc"
self.hdr10plus_to_dv = False
self.hdr10plus_file = "HDR10Plus.json"
# Get resolution info from HDR10 track for display
hdr10_track = next((v for v in videos if v.range == Video.Range.HDR10), None)
hdr10p_track = next((v for v in videos if v.range == Video.Range.HDR10P), None)
track_for_res = hdr10_track or hdr10p_track
self.resolution = f"{track_for_res.height}p" if track_for_res and track_for_res.height else "Unknown"
console.print(Padding(Rule(f"[rule.text]HDR10+DV Hybrid ({self.resolution})"), (1, 2)))
for video in self.videos:
if not video.path or not os.path.exists(video.path):
raise ValueError(f"Video track {video.id} was not downloaded before injection.")
# Check if we have DV track available
has_dv = any(video.range == Video.Range.DV for video in self.videos)
has_hdr10 = any(video.range == Video.Range.HDR10 for video in self.videos)
has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
if not has_hdr10:
raise ValueError("No HDR10 track available for hybrid processing.")
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
if not has_dv and has_hdr10p:
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
self.hdr10plus_to_dv = True
elif not has_dv:
raise ValueError("No DV track available and no HDR10+ to convert.")
if os.path.isfile(config.directories.temp / self.hevc_file):
self.log.info("✓ Already Injected")
return
for video in videos:
# Use the actual path from the video track
save_path = video.path
if not save_path or not os.path.exists(save_path):
raise ValueError(f"Video track {video.id} was not downloaded or path not found: {save_path}")
if video.range == Video.Range.HDR10:
self.extract_stream(save_path, "HDR10")
elif video.range == Video.Range.HDR10P:
self.extract_stream(save_path, "HDR10")
self.hdr_type = "HDR10+"
elif video.range == Video.Range.DV:
self.extract_stream(save_path, "DV")
if self.hdr10plus_to_dv:
# Extract HDR10+ metadata and convert to DV
hdr10p_video = next(v for v in videos if v.range == Video.Range.HDR10P)
self.extract_hdr10plus(hdr10p_video)
self.convert_hdr10plus_to_dv()
else:
# Regular DV extraction
dv_video = next(v for v in videos if v.range == Video.Range.DV)
self.extract_rpu(dv_video)
if os.path.isfile(config.directories.temp / "RPU_UNT.bin"):
self.rpu_file = "RPU_UNT.bin"
self.level_6()
# Mode 3 conversion already done during extraction when not untouched
elif os.path.isfile(config.directories.temp / "RPU.bin"):
# RPU already extracted with mode 3
pass
self.injecting()
self.log.info("✓ Injection Completed")
if self.source == ("itunes" or "appletvplus"):
Path.unlink(config.directories.temp / "hdr10.mkv")
Path.unlink(config.directories.temp / "dv.mkv")
Path.unlink(config.directories.temp / "HDR10.hevc", missing_ok=True)
Path.unlink(config.directories.temp / "DV.hevc", missing_ok=True)
Path.unlink(config.directories.temp / f"{self.rpu_file}", missing_ok=True)
def ffmpeg_simple(self, save_path, output):
"""Simple ffmpeg execution without progress tracking"""
p = subprocess.run(
[
"ffmpeg",
"-nostdin",
"-i",
str(save_path),
"-c:v",
"copy",
str(output),
"-y", # overwrite output
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p.returncode
def extract_stream(self, save_path, type_):
output = Path(config.directories.temp / f"{type_}.hevc")
with console.status(f"Extracting {type_} stream...", spinner="dots"):
returncode = self.ffmpeg_simple(save_path, output)
if returncode:
output.unlink(missing_ok=True)
self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1)
self.log.info(f"Extracted {type_} stream")
def extract_rpu(self, video, untouched=False):
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
config.directories.temp / "RPU_UNT.bin"
):
return
with console.status(
f"Extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream...", spinner="dots"
):
extraction_args = [str(DoviTool)]
if not untouched:
extraction_args += ["-m", "3"]
extraction_args += [
"extract-rpu",
config.directories.temp / "DV.hevc",
"-o",
config.directories.temp / f"{'RPU' if not untouched else 'RPU_UNT'}.bin",
]
rpu_extraction = subprocess.run(
extraction_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if rpu_extraction.returncode:
Path.unlink(config.directories.temp / f"{'RPU' if not untouched else 'RPU_UNT'}.bin")
if b"MAX_PQ_LUMINANCE" in rpu_extraction.stderr:
self.extract_rpu(video, untouched=True)
elif b"Invalid PPS index" in rpu_extraction.stderr:
raise ValueError("Dolby Vision VideoTrack seems to be corrupt")
else:
raise ValueError(f"Failed extracting{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
self.log.info(f"Extracted{' untouched ' if untouched else ' '}RPU from Dolby Vision stream")
def level_6(self):
"""Edit RPU Level 6 values"""
with open(config.directories.temp / "L6.json", "w+") as level6_file:
level6 = {
"cm_version": "V29",
"length": 0,
"level6": {
"max_display_mastering_luminance": 1000,
"min_display_mastering_luminance": 1,
"max_content_light_level": 0,
"max_frame_average_light_level": 0,
},
}
json.dump(level6, level6_file, indent=3)
if not os.path.isfile(config.directories.temp / "RPU_L6.bin"):
with console.status("Editing RPU Level 6 values...", spinner="dots"):
level6 = subprocess.run(
[
str(DoviTool),
"editor",
"-i",
config.directories.temp / self.rpu_file,
"-j",
config.directories.temp / "L6.json",
"-o",
config.directories.temp / "RPU_L6.bin",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if level6.returncode:
Path.unlink(config.directories.temp / "RPU_L6.bin")
raise ValueError("Failed editing RPU Level 6 values")
self.log.info("Edited RPU Level 6 values")
# Update rpu_file to use the edited version
self.rpu_file = "RPU_L6.bin"
def injecting(self):
if os.path.isfile(config.directories.temp / self.hevc_file):
return
with console.status(f"Injecting Dolby Vision metadata into {self.hdr_type} stream...", spinner="dots"):
inject_cmd = [
str(DoviTool),
"inject-rpu",
"-i",
config.directories.temp / "HDR10.hevc",
"--rpu-in",
config.directories.temp / self.rpu_file,
]
# If we converted from HDR10+, optionally remove HDR10+ metadata during injection
# Default to removing HDR10+ metadata since we're converting to DV
if self.hdr10plus_to_dv:
inject_cmd.append("--drop-hdr10plus")
self.log.info(" - Removing HDR10+ metadata during injection")
inject_cmd.extend(["-o", config.directories.temp / self.hevc_file])
inject = subprocess.run(
inject_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if inject.returncode:
Path.unlink(config.directories.temp / self.hevc_file)
raise ValueError("Failed injecting Dolby Vision metadata into HDR10 stream")
self.log.info(f"Injected Dolby Vision metadata into {self.hdr_type} stream")
def extract_hdr10plus(self, _video):
"""Extract HDR10+ metadata from the video stream"""
if os.path.isfile(config.directories.temp / self.hdr10plus_file):
return
if not HDR10PlusTool:
raise ValueError("HDR10Plus_tool not found. Please install it to use HDR10+ to DV conversion.")
with console.status("Extracting HDR10+ metadata...", spinner="dots"):
# HDR10Plus_tool needs raw HEVC stream
extraction = subprocess.run(
[
str(HDR10PlusTool),
"extract",
str(config.directories.temp / "HDR10.hevc"),
"-o",
str(config.directories.temp / self.hdr10plus_file),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extraction.returncode:
raise ValueError("Failed extracting HDR10+ metadata")
# Check if the extracted file has content
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
raise ValueError("No HDR10+ metadata found in the stream")
self.log.info("Extracted HDR10+ metadata")
def convert_hdr10plus_to_dv(self):
"""Convert HDR10+ metadata to Dolby Vision RPU"""
if os.path.isfile(config.directories.temp / "RPU.bin"):
return
with console.status("Converting HDR10+ metadata to Dolby Vision...", spinner="dots"):
# First create the extra metadata JSON for dovi_tool
extra_metadata = {
"cm_version": "V29",
"length": 0, # dovi_tool will figure this out
"level6": {
"max_display_mastering_luminance": 1000,
"min_display_mastering_luminance": 1,
"max_content_light_level": 0,
"max_frame_average_light_level": 0,
},
}
with open(config.directories.temp / "extra.json", "w") as f:
json.dump(extra_metadata, f, indent=2)
# Generate DV RPU from HDR10+ metadata
conversion = subprocess.run(
[
str(DoviTool),
"generate",
"-j",
str(config.directories.temp / "extra.json"),
"--hdr10plus-json",
str(config.directories.temp / self.hdr10plus_file),
"-o",
str(config.directories.temp / "RPU.bin"),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if conversion.returncode:
raise ValueError("Failed converting HDR10+ to Dolby Vision")
self.log.info("Converted HDR10+ metadata to Dolby Vision")
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
# Clean up temporary files
Path.unlink(config.directories.temp / "extra.json")
Path.unlink(config.directories.temp / self.hdr10plus_file)

View File

@@ -15,9 +15,11 @@ from construct import Container
from pycaption import Caption, CaptionList, CaptionNode, WebVTTReader from pycaption import Caption, CaptionList, CaptionNode, WebVTTReader
from pycaption.geometry import Layout from pycaption.geometry import Layout
from pymp4.parser import MP4 from pymp4.parser import MP4
from subby import CommonIssuesFixer, SAMIConverter, SDHStripper, WebVTTConverter
from subtitle_filter import Subtitles from subtitle_filter import Subtitles
from unshackle.core import binaries from unshackle.core import binaries
from unshackle.core.config import config
from unshackle.core.tracks.track import Track from unshackle.core.tracks.track import Track
from unshackle.core.utilities import try_ensure_utf8 from unshackle.core.utilities import try_ensure_utf8
from unshackle.core.utils.webvtt import merge_segmented_webvtt from unshackle.core.utils.webvtt import merge_segmented_webvtt
@@ -30,6 +32,7 @@ class Subtitle(Track):
SubStationAlphav4 = "ASS" # https://wikipedia.org/wiki/SubStation_Alpha#Advanced_SubStation_Alpha= SubStationAlphav4 = "ASS" # https://wikipedia.org/wiki/SubStation_Alpha#Advanced_SubStation_Alpha=
TimedTextMarkupLang = "TTML" # https://wikipedia.org/wiki/Timed_Text_Markup_Language TimedTextMarkupLang = "TTML" # https://wikipedia.org/wiki/Timed_Text_Markup_Language
WebVTT = "VTT" # https://wikipedia.org/wiki/WebVTT WebVTT = "VTT" # https://wikipedia.org/wiki/WebVTT
SAMI = "SMI" # https://wikipedia.org/wiki/SAMI
# MPEG-DASH box-encapsulated subtitle formats # MPEG-DASH box-encapsulated subtitle formats
fTTML = "STPP" # https://www.w3.org/TR/2018/REC-ttml-imsc1.0.1-20180424 fTTML = "STPP" # https://www.w3.org/TR/2018/REC-ttml-imsc1.0.1-20180424
fVTT = "WVTT" # https://www.w3.org/TR/webvtt1 fVTT = "WVTT" # https://www.w3.org/TR/webvtt1
@@ -51,6 +54,8 @@ class Subtitle(Track):
return Subtitle.Codec.TimedTextMarkupLang return Subtitle.Codec.TimedTextMarkupLang
elif mime == "vtt": elif mime == "vtt":
return Subtitle.Codec.WebVTT return Subtitle.Codec.WebVTT
elif mime in ("smi", "sami"):
return Subtitle.Codec.SAMI
elif mime == "stpp": elif mime == "stpp":
return Subtitle.Codec.fTTML return Subtitle.Codec.fTTML
elif mime == "wvtt": elif mime == "wvtt":
@@ -228,6 +233,7 @@ class Subtitle(Track):
try: try:
caption_set = pycaption.WebVTTReader().read(text) caption_set = pycaption.WebVTTReader().read(text)
Subtitle.merge_same_cues(caption_set) Subtitle.merge_same_cues(caption_set)
Subtitle.filter_unwanted_cues(caption_set)
subtitle_text = pycaption.WebVTTWriter().write(caption_set) subtitle_text = pycaption.WebVTTWriter().write(caption_set)
self.path.write_text(subtitle_text, encoding="utf8") self.path.write_text(subtitle_text, encoding="utf8")
except pycaption.exceptions.CaptionReadSyntaxError: except pycaption.exceptions.CaptionReadSyntaxError:
@@ -236,6 +242,7 @@ class Subtitle(Track):
try: try:
caption_set = pycaption.WebVTTReader().read(text) caption_set = pycaption.WebVTTReader().read(text)
Subtitle.merge_same_cues(caption_set) Subtitle.merge_same_cues(caption_set)
Subtitle.filter_unwanted_cues(caption_set)
subtitle_text = pycaption.WebVTTWriter().write(caption_set) subtitle_text = pycaption.WebVTTWriter().write(caption_set)
self.path.write_text(subtitle_text, encoding="utf8") self.path.write_text(subtitle_text, encoding="utf8")
except Exception: except Exception:
@@ -306,10 +313,160 @@ class Subtitle(Track):
return "\n".join(sanitized_lines) return "\n".join(sanitized_lines)
def convert_with_subby(self, codec: Subtitle.Codec) -> Path:
"""
Convert subtitle using subby library for better format support and processing.
This method leverages subby's advanced subtitle processing capabilities
including better WebVTT handling, SDH stripping, and common issue fixing.
"""
if not self.path or not self.path.exists():
raise ValueError("You must download the subtitle track first.")
if self.codec == codec:
return self.path
output_path = self.path.with_suffix(f".{codec.value.lower()}")
original_path = self.path
try:
# Convert to SRT using subby first
srt_subtitles = None
if self.codec == Subtitle.Codec.WebVTT:
converter = WebVTTConverter()
srt_subtitles = converter.from_file(str(self.path))
elif self.codec == Subtitle.Codec.SAMI:
converter = SAMIConverter()
srt_subtitles = converter.from_file(str(self.path))
if srt_subtitles is not None:
# Apply common fixes
fixer = CommonIssuesFixer()
fixed_srt, _ = fixer.from_srt(srt_subtitles)
# If target is SRT, we're done
if codec == Subtitle.Codec.SubRip:
output_path.write_text(str(fixed_srt), encoding="utf8")
else:
# Convert from SRT to target format using existing pycaption logic
temp_srt_path = self.path.with_suffix(".temp.srt")
temp_srt_path.write_text(str(fixed_srt), encoding="utf8")
# Parse the SRT and convert to target format
caption_set = self.parse(temp_srt_path.read_bytes(), Subtitle.Codec.SubRip)
self.merge_same_cues(caption_set)
writer = {
Subtitle.Codec.TimedTextMarkupLang: pycaption.DFXPWriter,
Subtitle.Codec.WebVTT: pycaption.WebVTTWriter,
}.get(codec)
if writer:
subtitle_text = writer().write(caption_set)
output_path.write_text(subtitle_text, encoding="utf8")
else:
# Fall back to existing conversion method
temp_srt_path.unlink()
return self._convert_standard(codec)
temp_srt_path.unlink()
if original_path.exists() and original_path != output_path:
original_path.unlink()
self.path = output_path
self.codec = codec
if callable(self.OnConverted):
self.OnConverted(codec)
return output_path
else:
# Fall back to existing conversion method
return self._convert_standard(codec)
except Exception:
# Fall back to existing conversion method on any error
return self._convert_standard(codec)
def convert(self, codec: Subtitle.Codec) -> Path: def convert(self, codec: Subtitle.Codec) -> Path:
""" """
Convert this Subtitle to another Format. Convert this Subtitle to another Format.
The conversion method is determined by the 'conversion_method' setting in config:
- 'auto' (default): Uses subby for WebVTT/SAMI, standard for others
- 'subby': Always uses subby with CommonIssuesFixer
- 'subtitleedit': Uses SubtitleEdit when available, falls back to pycaption
- 'pycaption': Uses only pycaption library
"""
# Check configuration for conversion method
conversion_method = config.subtitle.get("conversion_method", "auto")
if conversion_method == "subby":
return self.convert_with_subby(codec)
elif conversion_method == "subtitleedit":
return self._convert_standard(codec) # SubtitleEdit is used in standard conversion
elif conversion_method == "pycaption":
return self._convert_pycaption_only(codec)
elif conversion_method == "auto":
# Use subby for formats it handles better
if self.codec in (Subtitle.Codec.WebVTT, Subtitle.Codec.SAMI):
return self.convert_with_subby(codec)
else:
return self._convert_standard(codec)
else:
return self._convert_standard(codec)
def _convert_pycaption_only(self, codec: Subtitle.Codec) -> Path:
"""
Convert subtitle using only pycaption library (no SubtitleEdit, no subby).
This is the original conversion method that only uses pycaption.
"""
if not self.path or not self.path.exists():
raise ValueError("You must download the subtitle track first.")
if self.codec == codec:
return self.path
output_path = self.path.with_suffix(f".{codec.value.lower()}")
original_path = self.path
# Use only pycaption for conversion
writer = {
Subtitle.Codec.SubRip: pycaption.SRTWriter,
Subtitle.Codec.TimedTextMarkupLang: pycaption.DFXPWriter,
Subtitle.Codec.WebVTT: pycaption.WebVTTWriter,
}.get(codec)
if writer is None:
raise NotImplementedError(f"Cannot convert {self.codec.name} to {codec.name} using pycaption only.")
caption_set = self.parse(self.path.read_bytes(), self.codec)
Subtitle.merge_same_cues(caption_set)
if codec == Subtitle.Codec.WebVTT:
Subtitle.filter_unwanted_cues(caption_set)
subtitle_text = writer().write(caption_set)
output_path.write_text(subtitle_text, encoding="utf8")
if original_path.exists() and original_path != output_path:
original_path.unlink()
self.path = output_path
self.codec = codec
if callable(self.OnConverted):
self.OnConverted(codec)
return output_path
def _convert_standard(self, codec: Subtitle.Codec) -> Path:
"""
Convert this Subtitle to another Format.
The file path location of the Subtitle data will be kept at the same The file path location of the Subtitle data will be kept at the same
location but the file extension will be changed appropriately. location but the file extension will be changed appropriately.
@@ -318,6 +475,7 @@ class Subtitle(Track):
- TimedTextMarkupLang - SubtitleEdit or pycaption.DFXPWriter - TimedTextMarkupLang - SubtitleEdit or pycaption.DFXPWriter
- WebVTT - SubtitleEdit or pycaption.WebVTTWriter - WebVTT - SubtitleEdit or pycaption.WebVTTWriter
- SubStationAlphav4 - SubtitleEdit - SubStationAlphav4 - SubtitleEdit
- SAMI - subby.SAMIConverter (when available)
- fTTML* - custom code using some pycaption functions - fTTML* - custom code using some pycaption functions
- fVTT* - custom code using some pycaption functions - fVTT* - custom code using some pycaption functions
*: Can read from format, but cannot convert to format *: Can read from format, but cannot convert to format
@@ -366,6 +524,8 @@ class Subtitle(Track):
caption_set = self.parse(self.path.read_bytes(), self.codec) caption_set = self.parse(self.path.read_bytes(), self.codec)
Subtitle.merge_same_cues(caption_set) Subtitle.merge_same_cues(caption_set)
if codec == Subtitle.Codec.WebVTT:
Subtitle.filter_unwanted_cues(caption_set)
subtitle_text = writer().write(caption_set) subtitle_text = writer().write(caption_set)
output_path.write_text(subtitle_text, encoding="utf8") output_path.write_text(subtitle_text, encoding="utf8")
@@ -416,6 +576,13 @@ class Subtitle(Track):
text = Subtitle.sanitize_broken_webvtt(text) text = Subtitle.sanitize_broken_webvtt(text)
text = Subtitle.space_webvtt_headers(text) text = Subtitle.space_webvtt_headers(text)
caption_set = pycaption.WebVTTReader().read(text) caption_set = pycaption.WebVTTReader().read(text)
elif codec == Subtitle.Codec.SAMI:
# Use subby for SAMI parsing
converter = SAMIConverter()
srt_subtitles = converter.from_bytes(data)
# Convert SRT back to CaptionSet for compatibility
srt_text = str(srt_subtitles).encode("utf8")
caption_set = Subtitle.parse(srt_text, Subtitle.Codec.SubRip)
else: else:
raise ValueError(f'Unknown Subtitle format "{codec}"...') raise ValueError(f'Unknown Subtitle format "{codec}"...')
except pycaption.exceptions.CaptionReadSyntaxError as e: except pycaption.exceptions.CaptionReadSyntaxError as e:
@@ -520,6 +687,24 @@ class Subtitle(Track):
if merged_captions: if merged_captions:
caption_set.set_captions(lang, merged_captions) caption_set.set_captions(lang, merged_captions)
@staticmethod
def filter_unwanted_cues(caption_set: pycaption.CaptionSet):
"""
Filter out subtitle cues containing only &nbsp; or whitespace.
"""
for lang in caption_set.get_languages():
captions = caption_set.get_captions(lang)
filtered_captions = pycaption.CaptionList()
for caption in captions:
text = caption.get_text().strip()
if not text or text == "&nbsp;" or all(c in " \t\n\r\xa0" for c in text.replace("&nbsp;", "\xa0")):
continue
filtered_captions.append(caption)
caption_set.set_captions(lang, filtered_captions)
@staticmethod @staticmethod
def merge_segmented_wvtt(data: bytes, period_start: float = 0.0) -> tuple[CaptionList, Optional[str]]: def merge_segmented_wvtt(data: bytes, period_start: float = 0.0) -> tuple[CaptionList, Optional[str]]:
""" """
@@ -660,11 +845,45 @@ class Subtitle(Track):
def strip_hearing_impaired(self) -> None: def strip_hearing_impaired(self) -> None:
""" """
Strip captions for hearing impaired (SDH). Strip captions for hearing impaired (SDH).
It uses SubtitleEdit if available, otherwise filter-subs.
The SDH stripping method is determined by the 'sdh_method' setting in config:
- 'auto' (default): Tries subby first, then SubtitleEdit, then filter-subs
- 'subby': Uses subby's SDHStripper
- 'subtitleedit': Uses SubtitleEdit when available
- 'filter-subs': Uses subtitle-filter library
""" """
if not self.path or not self.path.exists(): if not self.path or not self.path.exists():
raise ValueError("You must download the subtitle track first.") raise ValueError("You must download the subtitle track first.")
# Check configuration for SDH stripping method
sdh_method = config.subtitle.get("sdh_method", "auto")
if sdh_method == "subby" and self.codec == Subtitle.Codec.SubRip:
# Use subby's SDHStripper directly on the file
stripper = SDHStripper()
stripped_srt, _ = stripper.from_file(str(self.path))
self.path.write_text(str(stripped_srt), encoding="utf8")
return
elif sdh_method == "subtitleedit" and binaries.SubtitleEdit:
# Force use of SubtitleEdit
pass # Continue to SubtitleEdit section below
elif sdh_method == "filter-subs":
# Force use of filter-subs
sub = Subtitles(self.path)
sub.filter(rm_fonts=True, rm_ast=True, rm_music=True, rm_effects=True, rm_names=True, rm_author=True)
sub.save()
return
elif sdh_method == "auto":
# Try subby first for SRT files, then fall back
if self.codec == Subtitle.Codec.SubRip:
try:
stripper = SDHStripper()
stripped_srt, _ = stripper.from_file(str(self.path))
self.path.write_text(str(stripped_srt), encoding="utf8")
return
except Exception:
pass # Fall through to other methods
if binaries.SubtitleEdit: if binaries.SubtitleEdit:
if self.codec == Subtitle.Codec.SubStationAlphav4: if self.codec == Subtitle.Codec.SubStationAlphav4:
output_format = "AdvancedSubStationAlpha" output_format = "AdvancedSubStationAlpha"

View File

@@ -11,6 +11,7 @@ from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn, TimeRe
from rich.table import Table from rich.table import Table
from rich.tree import Tree from rich.tree import Tree
from unshackle.core import binaries
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.console import console from unshackle.core.console import console
from unshackle.core.constants import LANGUAGE_MAX_DISTANCE, AnyTrack, TrackT from unshackle.core.constants import LANGUAGE_MAX_DISTANCE, AnyTrack, TrackT
@@ -253,6 +254,31 @@ class Tracks:
def select_subtitles(self, x: Callable[[Subtitle], bool]) -> None: def select_subtitles(self, x: Callable[[Subtitle], bool]) -> None:
self.subtitles = list(filter(x, self.subtitles)) self.subtitles = list(filter(x, self.subtitles))
def select_hybrid(self, tracks, quality):
hdr10_tracks = [
v
for v in tracks
if v.range == Video.Range.HDR10 and (v.height in quality or int(v.width * 9 / 16) in quality)
]
hdr10 = []
for res in quality:
candidates = [v for v in hdr10_tracks if v.height == res or int(v.width * 9 / 16) == res]
if candidates:
best = max(candidates, key=lambda v: v.bitrate) # assumes .bitrate exists
hdr10.append(best)
dv_tracks = [v for v in tracks if v.range == Video.Range.DV]
lowest_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
def select(x):
if x in hdr10:
return True
if lowest_dv and x is lowest_dv:
return True
return False
return select
def by_resolutions(self, resolutions: list[int], per_resolution: int = 0) -> None: def by_resolutions(self, resolutions: list[int], per_resolution: int = 0) -> None:
# Note: Do not merge these list comprehensions. They must be done separately so the results # Note: Do not merge these list comprehensions. They must be done separately so the results
# from the 16:9 canvas check is only used if there's no exact height resolution match. # from the 16:9 canvas check is only used if there's no exact height resolution match.
@@ -290,8 +316,11 @@ class Tracks:
progress: Update a rich progress bar via `completed=...`. This must be the progress: Update a rich progress bar via `completed=...`. This must be the
progress object's update() func, pre-set with task id via functools.partial. progress object's update() func, pre-set with task id via functools.partial.
""" """
if not binaries.MKVToolNix:
raise RuntimeError("MKVToolNix (mkvmerge) is required for muxing but was not found")
cl = [ cl = [
"mkvmerge", str(binaries.MKVToolNix),
"--no-date", # remove dates from the output for security "--no-date", # remove dates from the output for security
] ]
@@ -302,21 +331,39 @@ class Tracks:
if not vt.path or not vt.path.exists(): if not vt.path or not vt.path.exists():
raise ValueError("Video Track must be downloaded before muxing...") raise ValueError("Video Track must be downloaded before muxing...")
events.emit(events.Types.TRACK_MULTIPLEX, track=vt) events.emit(events.Types.TRACK_MULTIPLEX, track=vt)
cl.extend(
[ # Prepare base arguments
"--language", video_args = [
f"0:{vt.language}", "--language",
"--default-track", f"0:{vt.language}",
f"0:{i == 0}", "--default-track",
"--original-flag", f"0:{i == 0}",
f"0:{vt.is_original_lang}", "--original-flag",
"--compression", f"0:{vt.is_original_lang}",
"0:none", # disable extra compression "--compression",
"(", "0:none", # disable extra compression
str(vt.path), ]
")",
] # Add FPS fix if needed (typically for hybrid mode to prevent sync issues)
) if hasattr(vt, "needs_duration_fix") and vt.needs_duration_fix and vt.fps:
video_args.extend(
[
"--default-duration",
f"0:{vt.fps}fps" if isinstance(vt.fps, str) else f"0:{vt.fps:.3f}fps",
"--fix-bitstream-timing-information",
"0:1",
]
)
if hasattr(vt, "range") and vt.range == Video.Range.HLG:
video_args.extend(
[
"--color-transfer-characteristics",
"0:18", # ARIB STD-B67 (HLG)
]
)
cl.extend(video_args + ["(", str(vt.path), ")"])
for i, at in enumerate(self.audio): for i, at in enumerate(self.audio):
if not at.path or not at.path.exists(): if not at.path or not at.path.exists():

View File

@@ -94,6 +94,7 @@ class Video(Track):
HDR10 = "HDR10" # https://en.wikipedia.org/wiki/HDR10 HDR10 = "HDR10" # https://en.wikipedia.org/wiki/HDR10
HDR10P = "HDR10+" # https://en.wikipedia.org/wiki/HDR10%2B HDR10P = "HDR10+" # https://en.wikipedia.org/wiki/HDR10%2B
DV = "DV" # https://en.wikipedia.org/wiki/Dolby_Vision DV = "DV" # https://en.wikipedia.org/wiki/Dolby_Vision
HYBRID = "HYBRID" # Selects both HDR10 and DV tracks for hybrid processing with DoviTool
@staticmethod @staticmethod
def from_cicp(primaries: int, transfer: int, matrix: int) -> Video.Range: def from_cicp(primaries: int, transfer: int, matrix: int) -> Video.Range:
@@ -115,6 +116,7 @@ class Video(Track):
class Transfer(Enum): class Transfer(Enum):
Unspecified = 0 Unspecified = 0
BT_709 = 1 BT_709 = 1
Unspecified_Image = 2
BT_601 = 6 BT_601 = 6
BT_2020 = 14 BT_2020 = 14
BT_2100 = 15 BT_2100 = 15
@@ -236,6 +238,8 @@ class Video(Track):
except Exception as e: except Exception as e:
raise ValueError("Expected fps to be a number, float, or a string as numerator/denominator form, " + str(e)) raise ValueError("Expected fps to be a number, float, or a string as numerator/denominator form, " + str(e))
self.needs_duration_fix = False
def __str__(self) -> str: def __str__(self) -> str:
return " | ".join( return " | ".join(
filter( filter(

View File

@@ -0,0 +1,276 @@
from __future__ import annotations
import asyncio
import json
import time
from pathlib import Path
from typing import Optional
import requests
class UpdateChecker:
"""
Check for available updates from the GitHub repository.
This class provides functionality to check for newer versions of the application
by querying the GitHub releases API. It includes rate limiting, caching, and
both synchronous and asynchronous interfaces.
Attributes:
REPO_URL: GitHub API URL for latest release
TIMEOUT: Request timeout in seconds
DEFAULT_CHECK_INTERVAL: Default time between checks in seconds (24 hours)
"""
REPO_URL = "https://api.github.com/repos/unshackle-dl/unshackle/releases/latest"
TIMEOUT = 5
DEFAULT_CHECK_INTERVAL = 24 * 60 * 60
@classmethod
def _get_cache_file(cls) -> Path:
"""Get the path to the update check cache file."""
from unshackle.core.config import config
return config.directories.cache / "update_check.json"
@classmethod
def _load_cache_data(cls) -> dict:
"""
Load cache data from file.
Returns:
Cache data dictionary or empty dict if loading fails
"""
cache_file = cls._get_cache_file()
if not cache_file.exists():
return {}
try:
with open(cache_file, "r") as f:
return json.load(f)
except (json.JSONDecodeError, OSError):
return {}
@staticmethod
def _parse_version(version_string: str) -> str:
"""
Parse and normalize version string by removing 'v' prefix.
Args:
version_string: Raw version string from API
Returns:
Cleaned version string
"""
return version_string.lstrip("v")
@staticmethod
def _is_valid_version(version: str) -> bool:
"""
Validate version string format.
Args:
version: Version string to validate
Returns:
True if version string is valid semantic version, False otherwise
"""
if not version or not isinstance(version, str):
return False
try:
parts = version.split(".")
if len(parts) < 2:
return False
for part in parts:
int(part)
return True
except (ValueError, AttributeError):
return False
@classmethod
def _fetch_latest_version(cls) -> Optional[str]:
"""
Fetch the latest version from GitHub API.
Returns:
Latest version string if successful, None otherwise
"""
try:
response = requests.get(cls.REPO_URL, timeout=cls.TIMEOUT)
if response.status_code != 200:
return None
data = response.json()
latest_version = cls._parse_version(data.get("tag_name", ""))
return latest_version if cls._is_valid_version(latest_version) else None
except Exception:
return None
@classmethod
def _should_check_for_updates(cls, check_interval: int = DEFAULT_CHECK_INTERVAL) -> bool:
"""
Check if enough time has passed since the last update check.
Args:
check_interval: Time in seconds between checks (default: 24 hours)
Returns:
True if we should check for updates, False otherwise
"""
cache_data = cls._load_cache_data()
if not cache_data:
return True
last_check = cache_data.get("last_check", 0)
current_time = time.time()
return (current_time - last_check) >= check_interval
@classmethod
def _update_cache(cls, latest_version: Optional[str] = None, current_version: Optional[str] = None) -> None:
"""
Update the cache file with the current timestamp and version info.
Args:
latest_version: The latest version found, if any
current_version: The current version being used
"""
cache_file = cls._get_cache_file()
try:
cache_file.parent.mkdir(parents=True, exist_ok=True)
cache_data = {
"last_check": time.time(),
"latest_version": latest_version,
"current_version": current_version,
}
with open(cache_file, "w") as f:
json.dump(cache_data, f, indent=2)
except (OSError, json.JSONEncodeError):
pass
@staticmethod
def _compare_versions(current: str, latest: str) -> bool:
"""
Simple semantic version comparison.
Args:
current: Current version string (e.g., "1.1.0")
latest: Latest version string (e.g., "1.2.0")
Returns:
True if latest > current, False otherwise
"""
if not UpdateChecker._is_valid_version(current) or not UpdateChecker._is_valid_version(latest):
return False
try:
current_parts = [int(x) for x in current.split(".")]
latest_parts = [int(x) for x in latest.split(".")]
max_length = max(len(current_parts), len(latest_parts))
current_parts.extend([0] * (max_length - len(current_parts)))
latest_parts.extend([0] * (max_length - len(latest_parts)))
for current_part, latest_part in zip(current_parts, latest_parts):
if latest_part > current_part:
return True
elif latest_part < current_part:
return False
return False
except (ValueError, AttributeError):
return False
@classmethod
async def check_for_updates(cls, current_version: str) -> Optional[str]:
"""
Check if there's a newer version available on GitHub.
Args:
current_version: The current version string (e.g., "1.1.0")
Returns:
The latest version string if an update is available, None otherwise
"""
if not cls._is_valid_version(current_version):
return None
try:
loop = asyncio.get_event_loop()
latest_version = await loop.run_in_executor(None, cls._fetch_latest_version)
if latest_version and cls._compare_versions(current_version, latest_version):
return latest_version
except Exception:
pass
return None
@classmethod
def _get_cached_update_info(cls, current_version: str) -> Optional[str]:
"""
Check if there's a cached update available for the current version.
Args:
current_version: The current version string
Returns:
The latest version string if an update is available from cache, None otherwise
"""
cache_data = cls._load_cache_data()
if not cache_data:
return None
cached_current = cache_data.get("current_version")
cached_latest = cache_data.get("latest_version")
if cached_current == current_version and cached_latest:
if cls._compare_versions(current_version, cached_latest):
return cached_latest
return None
@classmethod
def check_for_updates_sync(cls, current_version: str, check_interval: Optional[int] = None) -> Optional[str]:
"""
Synchronous version of update check with rate limiting.
Args:
current_version: The current version string (e.g., "1.1.0")
check_interval: Time in seconds between checks (default: from config)
Returns:
The latest version string if an update is available, None otherwise
"""
if not cls._is_valid_version(current_version):
return None
if check_interval is None:
from unshackle.core.config import config
check_interval = config.update_check_interval * 60 * 60
if not cls._should_check_for_updates(check_interval):
return cls._get_cached_update_info(current_version)
latest_version = cls._fetch_latest_version()
cls._update_cache(latest_version, current_version)
if latest_version and cls._compare_versions(current_version, latest_version):
return latest_version
return None

View File

@@ -3,7 +3,6 @@ from __future__ import annotations
import logging import logging
import os import os
import re import re
import shutil
import subprocess import subprocess
import tempfile import tempfile
from difflib import SequenceMatcher from difflib import SequenceMatcher
@@ -12,6 +11,7 @@ from typing import Optional, Tuple
import requests import requests
from unshackle.core import binaries
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.titles.episode import Episode from unshackle.core.titles.episode import Episode
from unshackle.core.titles.movie import Movie from unshackle.core.titles.movie import Movie
@@ -175,8 +175,7 @@ def external_ids(tmdb_id: int, kind: str) -> dict:
def _apply_tags(path: Path, tags: dict[str, str]) -> None: def _apply_tags(path: Path, tags: dict[str, str]) -> None:
if not tags: if not tags:
return return
mkvpropedit = shutil.which("mkvpropedit") if not binaries.Mkvpropedit:
if not mkvpropedit:
log.debug("mkvpropedit not found on PATH; skipping tags") log.debug("mkvpropedit not found on PATH; skipping tags")
return return
log.debug("Applying tags to %s: %s", path, tags) log.debug("Applying tags to %s: %s", path, tags)
@@ -189,7 +188,7 @@ def _apply_tags(path: Path, tags: dict[str, str]) -> None:
tmp_path = Path(f.name) tmp_path = Path(f.name)
try: try:
subprocess.run( subprocess.run(
[mkvpropedit, str(path), "--tags", f"global:{tmp_path}"], [str(binaries.Mkvpropedit), str(path), "--tags", f"global:{tmp_path}"],
check=False, check=False,
stdout=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stderr=subprocess.DEVNULL,

View File

@@ -25,8 +25,20 @@ class Vaults:
def __len__(self) -> int: def __len__(self) -> int:
return len(self.vaults) return len(self.vaults)
def load(self, type_: str, **kwargs: Any) -> None: def load(self, type_: str, **kwargs: Any) -> bool:
"""Load a Vault into the vaults list.""" """Load a Vault into the vaults list. Returns True if successful, False otherwise."""
module = _MODULES.get(type_)
if not module:
raise ValueError(f"Unable to find vault command by the name '{type_}'.")
try:
vault = module(**kwargs)
self.vaults.append(vault)
return True
except Exception:
return False
def load_critical(self, type_: str, **kwargs: Any) -> None:
"""Load a critical Vault that must succeed or raise an exception."""
module = _MODULES.get(type_) module = _MODULES.get(type_)
if not module: if not module:
raise ValueError(f"Unable to find vault command by the name '{type_}'.") raise ValueError(f"Unable to find vault command by the name '{type_}'.")

View File

@@ -16,7 +16,7 @@ from unshackle.core.manifests import DASH
from unshackle.core.search_result import SearchResult from unshackle.core.search_result import SearchResult
from unshackle.core.service import Service from unshackle.core.service import Service
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
from unshackle.core.tracks import Chapter, Subtitle, Tracks from unshackle.core.tracks import Chapter, Subtitle, Tracks, Video
class EXAMPLE(Service): class EXAMPLE(Service):
@@ -33,6 +33,7 @@ class EXAMPLE(Service):
TITLE_RE = r"^(?:https?://?domain\.com/details/)?(?P<title_id>[^/]+)" TITLE_RE = r"^(?:https?://?domain\.com/details/)?(?P<title_id>[^/]+)"
GEOFENCE = ("US", "UK") GEOFENCE = ("US", "UK")
NO_SUBTITLES = True
@staticmethod @staticmethod
@click.command(name="EXAMPLE", short_help="https://domain.com") @click.command(name="EXAMPLE", short_help="https://domain.com")
@@ -49,6 +50,11 @@ class EXAMPLE(Service):
self.title = title self.title = title
self.movie = movie self.movie = movie
self.device = device self.device = device
self.cdm = ctx.obj.cdm
# Get range parameter for HDR support
range_param = ctx.parent.params.get("range_")
self.range = range_param[0].name if range_param else "SDR"
if self.config is None: if self.config is None:
raise Exception("Config is missing!") raise Exception("Config is missing!")
@@ -160,15 +166,54 @@ class EXAMPLE(Service):
return Series(episodes) return Series(episodes)
def get_tracks(self, title: Title_T) -> Tracks: def get_tracks(self, title: Title_T) -> Tracks:
# Handle HYBRID mode by fetching both HDR10 and DV tracks separately
if self.range == "HYBRID" and self.cdm.security_level != 3:
tracks = Tracks()
# Get HDR10 tracks
hdr10_tracks = self._get_tracks_for_range(title, "HDR10")
tracks.add(hdr10_tracks, warn_only=True)
# Get DV tracks
dv_tracks = self._get_tracks_for_range(title, "DV")
tracks.add(dv_tracks, warn_only=True)
return tracks
else:
# Normal single-range behavior
return self._get_tracks_for_range(title, self.range)
def _get_tracks_for_range(self, title: Title_T, range_override: str = None) -> Tracks:
# Use range_override if provided, otherwise use self.range
current_range = range_override if range_override else self.range
# Build API request parameters
params = {
"token": self.token,
"guid": title.id,
}
data = {
"type": self.config["client"][self.device]["type"],
}
# Add range-specific parameters
if current_range == "HDR10":
data["video_format"] = "hdr10"
elif current_range == "DV":
data["video_format"] = "dolby_vision"
else:
data["video_format"] = "sdr"
# Only request high-quality HDR content with L1 CDM
if current_range in ("HDR10", "DV") and self.cdm.security_level == 3:
# L3 CDM - skip HDR content
return Tracks()
streams = self.session.post( streams = self.session.post(
url=self.config["endpoints"]["streams"], url=self.config["endpoints"]["streams"],
params={ params=params,
"token": self.token, data=data,
"guid": title.id,
},
data={
"type": self.config["client"][self.device]["type"],
},
).json()["media"] ).json()["media"]
self.license = { self.license = {
@@ -182,6 +227,15 @@ class EXAMPLE(Service):
self.log.debug(f"Manifest URL: {manifest_url}") self.log.debug(f"Manifest URL: {manifest_url}")
tracks = DASH.from_url(url=manifest_url, session=self.session).to_tracks(language=title.language) tracks = DASH.from_url(url=manifest_url, session=self.session).to_tracks(language=title.language)
# Set range attributes on video tracks
for video in tracks.videos:
if current_range == "HDR10":
video.range = Video.Range.HDR10
elif current_range == "DV":
video.range = Video.Range.DV
else:
video.range = Video.Range.SDR
# Remove DRM-free ("clear") audio tracks # Remove DRM-free ("clear") audio tracks
tracks.audio = [ tracks.audio = [
track for track in tracks.audio if "clear" not in track.data["dash"]["representation"].get("id") track for track in tracks.audio if "clear" not in track.data["dash"]["representation"].get("id")

View File

@@ -4,14 +4,40 @@ tag: user_tag
# Set terminal background color (custom option not in CONFIG.md) # Set terminal background color (custom option not in CONFIG.md)
set_terminal_bg: false set_terminal_bg: false
# Set file naming convention
# true for style - Prime.Suspect.S07E01.The.Final.Act.Part.One.1080p.ITV.WEB-DL.AAC2.0.H.264
# false for style - Prime Suspect S07E01 The Final Act - Part One
scene_naming: true
# Check for updates from GitHub repository on startup (default: true)
update_checks: true
# How often to check for updates, in hours (default: 24)
update_check_interval: 24
# Muxing configuration # Muxing configuration
muxing: muxing:
set_title: false set_title: false
# Login credentials for each Service # Login credentials for each Service
credentials: credentials:
# Direct credentials (no profile support)
EXAMPLE: email@example.com:password EXAMPLE: email@example.com:password
EXAMPLE2: username:password
# Per-profile credentials with default fallback
SERVICE_NAME:
default: default@email.com:password # Used when no -p/--profile is specified
profile1: user1@email.com:password1
profile2: user2@email.com:password2
# Per-profile credentials without default (requires -p/--profile)
SERVICE_NAME2:
john: john@example.com:johnspassword
jane: jane@example.com:janespassword
# You can also use list format for passwords with special characters
SERVICE_NAME3:
default: ["user@email.com", ":PasswordWith:Colons"]
# Override default directories used across unshackle # Override default directories used across unshackle
directories: directories:
@@ -25,14 +51,25 @@ directories:
prds: PRDs prds: PRDs
# Additional directories that can be configured: # Additional directories that can be configured:
# commands: Commands # commands: Commands
# services: Services services:
- /path/to/services
- /other/path/to/services
# vaults: Vaults # vaults: Vaults
# fonts: Fonts # fonts: Fonts
# Pre-define which Widevine or PlayReady device to use for each Service # Pre-define which Widevine or PlayReady device to use for each Service
cdm: cdm:
# Global default CDM device (fallback for all services/profiles)
default: WVD_1 default: WVD_1
EXAMPLE: PRD_1
# Direct service-specific CDM
DIFFERENT_EXAMPLE: PRD_1
# Per-profile CDM configuration
EXAMPLE:
john_sd: chromecdm_903_l3 # Profile 'john_sd' uses Chrome CDM L3
jane_uhd: nexus_5_l1 # Profile 'jane_uhd' uses Nexus 5 L1
default: generic_android_l3 # Default CDM for this service
# Use pywidevine Serve-compliant Remote CDMs # Use pywidevine Serve-compliant Remote CDMs
remote_cdm: remote_cdm:
@@ -127,6 +164,15 @@ filenames:
# API key for The Movie Database (TMDB) # API key for The Movie Database (TMDB)
tmdb_api_key: "" tmdb_api_key: ""
# conversion_method:
# - auto (default): Smart routing - subby for WebVTT/SAMI, standard for others
# - subby: Always use subby with advanced processing
# - pycaption: Use only pycaption library (no SubtitleEdit, no subby)
# - subtitleedit: Prefer SubtitleEdit when available, fall back to pycaption
subtitle:
conversion_method: auto
sdh_method: auto
# Configuration for pywidevine's serve functionality # Configuration for pywidevine's serve functionality
serve: serve:
users: users:
@@ -140,23 +186,48 @@ serve:
# Configuration data for each Service # Configuration data for each Service
services: services:
# Service-specific configuration goes here # Service-specific configuration goes here
# EXAMPLE: # Profile-specific configurations can be nested under service names
# api_key: "service_specific_key"
# Legacy NordVPN configuration (use proxy_providers instead) # Example: with profile-specific device configs
nordvpn: EXAMPLE:
username: "" # Global service config
password: "" api_key: "service_api_key"
servers:
- us: 12 # Profile-specific device configurations
profiles:
john_sd:
device:
app_name: "AIV"
device_model: "SHIELD Android TV"
jane_uhd:
device:
app_name: "AIV"
device_model: "Fire TV Stick 4K"
# Example: Service with different regions per profile
SERVICE_NAME:
profiles:
us_account:
region: "US"
api_endpoint: "https://api.us.service.com"
uk_account:
region: "GB"
api_endpoint: "https://api.uk.service.com"
# External proxy provider services # External proxy provider services
proxy_providers: proxy_providers:
nordvpn: nordvpn:
username: username_from_service_credentials username: username_from_service_credentials
password: password_from_service_credentials password: password_from_service_credentials
servers: server_map:
- us: 12 # force US server #12 for US proxies - us: 12 # force US server #12 for US proxies
surfsharkvpn:
username: your_surfshark_service_username # Service credentials from https://my.surfshark.com/vpn/manual-setup/main/openvpn
password: your_surfshark_service_password # Service credentials (not your login password)
server_map:
- us: 3844 # force US server #3844 for US proxies
- gb: 2697 # force GB server #2697 for GB proxies
- au: 4621 # force AU server #4621 for AU proxies
basic: basic:
GB: GB:
- "socks5://username:password@bhx.socks.ipvanish.com:1080" # 1 (Birmingham) - "socks5://username:password@bhx.socks.ipvanish.com:1080" # 1 (Birmingham)

View File

@@ -30,7 +30,7 @@ class HTTP(Vault):
api_mode: "query" for query parameters or "json" for JSON API api_mode: "query" for query parameters or "json" for JSON API
""" """
super().__init__(name) super().__init__(name)
self.url = host.rstrip("/") self.url = host
self.password = password self.password = password
self.username = username self.username = username
self.api_mode = api_mode.lower() self.api_mode = api_mode.lower()
@@ -88,21 +88,23 @@ class HTTP(Vault):
if self.api_mode == "json": if self.api_mode == "json":
try: try:
title = getattr(self, "current_title", None) params = {
response = self.request( "kid": kid,
"GetKey", "service": service.lower(),
{ }
"kid": kid,
"service": service.lower(), response = self.request("GetKey", params)
"title": title,
},
)
if response.get("status") == "not_found": if response.get("status") == "not_found":
return None return None
keys = response.get("keys", []) keys = response.get("keys", [])
for key_entry in keys: for key_entry in keys:
if key_entry["kid"] == kid: if isinstance(key_entry, str) and ":" in key_entry:
return key_entry["key"] entry_kid, entry_key = key_entry.split(":", 1)
if entry_kid == kid:
return entry_key
elif isinstance(key_entry, dict):
if key_entry.get("kid") == kid:
return key_entry.get("key")
except Exception as e: except Exception as e:
print(f"Failed to get key ({e.__class__.__name__}: {e})") print(f"Failed to get key ({e.__class__.__name__}: {e})")
return None return None

30
uv.lock generated
View File

@@ -1391,6 +1391,26 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" }, { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" },
] ]
[[package]]
name = "srt"
version = "3.5.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/66/b7/4a1bc231e0681ebf339337b0cd05b91dc6a0d701fa852bb812e244b7a030/srt-3.5.3.tar.gz", hash = "sha256:4884315043a4f0740fd1f878ed6caa376ac06d70e135f306a6dc44632eed0cc0", size = 28296, upload-time = "2023-03-28T02:35:44.007Z" }
[[package]]
name = "subby"
version = "0.3.21"
source = { git = "https://github.com/vevv/subby.git#390cb2f4a55e98057cdd65314d8cbffd5d0a11f1" }
dependencies = [
{ name = "beautifulsoup4" },
{ name = "click" },
{ name = "langcodes" },
{ name = "lxml" },
{ name = "pymp4" },
{ name = "srt" },
{ name = "tinycss" },
]
[[package]] [[package]]
name = "subtitle-filter" name = "subtitle-filter"
version = "1.5.0" version = "1.5.0"
@@ -1400,6 +1420,12 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/10/40/c5d138e1f302b25240678943422a646feea52bab1f594c669c101c5e5070/subtitle_filter-1.5.0-py3-none-any.whl", hash = "sha256:6b506315be64870fba2e6894a70d76389407ce58c325fdf05129e0530f0a0f5b", size = 8346, upload-time = "2024-08-01T22:42:47.787Z" }, { url = "https://files.pythonhosted.org/packages/10/40/c5d138e1f302b25240678943422a646feea52bab1f594c669c101c5e5070/subtitle_filter-1.5.0-py3-none-any.whl", hash = "sha256:6b506315be64870fba2e6894a70d76389407ce58c325fdf05129e0530f0a0f5b", size = 8346, upload-time = "2024-08-01T22:42:47.787Z" },
] ]
[[package]]
name = "tinycss"
version = "0.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/05/59/af583fff6236c7d2f94f8175c40ce501dcefb8d1b42e4bb7a2622dff689e/tinycss-0.4.tar.gz", hash = "sha256:12306fb50e5e9e7eaeef84b802ed877488ba80e35c672867f548c0924a76716e", size = 87759, upload-time = "2016-09-23T16:30:14.894Z" }
[[package]] [[package]]
name = "tomli" name = "tomli"
version = "2.2.1" version = "2.2.1"
@@ -1479,7 +1505,7 @@ wheels = [
[[package]] [[package]]
name = "unshackle" name = "unshackle"
version = "1.0.1" version = "1.4.0"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "appdirs" }, { name = "appdirs" },
@@ -1510,6 +1536,7 @@ dependencies = [
{ name = "rlaphoenix-m3u8" }, { name = "rlaphoenix-m3u8" },
{ name = "ruamel-yaml" }, { name = "ruamel-yaml" },
{ name = "sortedcontainers" }, { name = "sortedcontainers" },
{ name = "subby" },
{ name = "subtitle-filter" }, { name = "subtitle-filter" },
{ name = "unidecode" }, { name = "unidecode" },
{ name = "urllib3" }, { name = "urllib3" },
@@ -1558,6 +1585,7 @@ requires-dist = [
{ name = "rlaphoenix-m3u8", specifier = ">=3.4.0,<4" }, { name = "rlaphoenix-m3u8", specifier = ">=3.4.0,<4" },
{ name = "ruamel-yaml", specifier = ">=0.18.6,<0.19" }, { name = "ruamel-yaml", specifier = ">=0.18.6,<0.19" },
{ name = "sortedcontainers", specifier = ">=2.4.0,<3" }, { name = "sortedcontainers", specifier = ">=2.4.0,<3" },
{ name = "subby", git = "https://github.com/vevv/subby.git" },
{ name = "subtitle-filter", specifier = ">=1.4.9,<2" }, { name = "subtitle-filter", specifier = ">=1.4.9,<2" },
{ name = "unidecode", specifier = ">=1.3.8,<2" }, { name = "unidecode", specifier = ">=1.3.8,<2" },
{ name = "urllib3", specifier = ">=2.2.1,<3" }, { name = "urllib3", specifier = ">=2.2.1,<3" },