12 Commits
1.1.0 ... 1.2.0

Author SHA1 Message Date
Andy
22c9aa195e feat: Bump version to 1.2.0 and update changelog, I'll eventually learn symantic versioning. 2025-07-30 23:15:20 +00:00
Andy
776d8f3df0 feat: Update version to 1.1.1 and add update checking functionality 2025-07-30 23:12:13 +00:00
Andy
67caf71295 Merge branch 'hdr10ptest' 2025-07-30 22:49:01 +00:00
Andy
3ed76d199c chore(workflow): 🗑️ Remove Docker build and publish workflow, its too messy at the moment doing manual builds for now. 2025-07-30 22:48:00 +00:00
Andy
4de9251f95 feat(tracks): Add duration fix handling for video and hybrid tracks 2025-07-30 21:39:34 +00:00
Andy
d2fb409ad9 feat(hybrid): Add HDR10+ support for conversion to Dolby Vision and enhance metadata extraction 2025-07-30 21:14:50 +00:00
Andy
fdff3a1c56 refactor(env): Enhance dependency check with detailed categorization and status summary 2025-07-30 20:12:43 +00:00
Andy
5d1f2eb458 feat(attachment): Ensure temporary directory is created for downloads 2025-07-30 18:52:36 +00:00
Andy
3efac3d474 feat(vaults): Enhance vault loading with success status 2025-07-30 17:29:06 +00:00
Andy
f578904b76 feat(subtitle): Add information into unshackle.yaml on how to use new Subby subtitle conversion. 2025-07-30 02:18:35 +00:00
Andy
9f20159605 feat(hybrid): Display resolution of HDR10 track in hybrid mode console output and clean up unused code 2025-07-30 02:08:07 +00:00
Andy
4decb0d107 feat(dl): Enhance hybrid processing to handle HDR10 and DV tracks separately by resolution, Hotfix for -q 2160,1080 both tracks will have Hybrid correctly now. 2025-07-30 01:09:59 +00:00
18 changed files with 554 additions and 373 deletions

View File

@@ -1,99 +0,0 @@
name: Build and Publish Docker Image
on:
push:
branches: [main, master]
paths: # run only when this file changed at all
- "unshackle/core/__init__.py"
pull_request: {} # optional delete if you dont build on PRs
workflow_dispatch: {} # manual override
jobs:
detect-version-change:
runs-on: ubuntu-latest
outputs:
changed: ${{ steps.vdiff.outputs.changed }}
version: ${{ steps.vdiff.outputs.version }}
steps:
- uses: actions/checkout@v4
with: { fetch-depth: 2 } # we need the previous commit :contentReference[oaicite:1]{index=1}
- name: Extract & compare version
id: vdiff
shell: bash
run: |
current=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
prev=$(git show HEAD^:unshackle/core/__init__.py \
| grep -oP '__version__ = "\K[^"]+' || echo '')
echo "version=$current" >>"$GITHUB_OUTPUT"
echo "changed=$([ "$current" != "$prev" ] && echo true || echo false)" >>"$GITHUB_OUTPUT"
echo "Current=$current Previous=$prev"
build-and-push:
needs: detect-version-change
if: needs.detect-version-change.outputs.changed == 'true' # only run when bumped :contentReference[oaicite:2]{index=2}
runs-on: ubuntu-latest
permissions: { contents: read, packages: write }
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Extract version from __init__.py
id: version
run: |
VERSION=$(grep -oP '__version__ = "\K[^"]+' unshackle/core/__init__.py)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "major_minor=$(echo $VERSION | cut -d. -f1-2)" >> $GITHUB_OUTPUT
echo "major=$(echo $VERSION | cut -d. -f1)" >> $GITHUB_OUTPUT
echo "Extracted version: $VERSION"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Container Registry
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest,enable={{is_default_branch}}
type=raw,value=v${{ steps.version.outputs.version }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.version }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.major_minor }},enable={{is_default_branch}}
type=raw,value=${{ steps.version.outputs.major }},enable={{is_default_branch}}
- name: Show planned tags
run: |
echo "Planning to create the following tags:"
echo "${{ steps.meta.outputs.tags }}"
- name: Build and push Docker image
uses: docker/build-push-action@v5
with:
context: .
platforms: linux/amd64,linux/arm64
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Test Docker image
if: github.event_name != 'pull_request'
run: |
docker run --rm ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest env check

View File

@@ -5,7 +5,40 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased] ## [1.2.0] - 2025-07-30
### Added
- **Update Checker**: Automatic GitHub release version checking on startup
- Configurable update notifications via `update_checks` setting in unshackle.yaml
- Non-blocking HTTP requests with 5-second timeout for performance
- Smart semantic version comparison supporting all version formats (x.y.z, x.y, x)
- Graceful error handling for network issues and API failures
- User-friendly update notifications with current → latest version display
- Direct links to GitHub releases page for easy updates
- **HDR10+ Support**: Enhanced HDR10+ metadata processing for hybrid tracks
- HDR10+ tool binary support (`hdr10plus_tool`) added to binaries module
- HDR10+ to Dolby Vision conversion capabilities in hybrid processing
- Enhanced metadata extraction for HDR10+ content
- **Duration Fix Handling**: Added duration correction for video and hybrid tracks
- **Temporary Directory Management**: Automatic creation of temp directories for attachment downloads
### Changed
- Enhanced configuration system with new `update_checks` boolean option (defaults to true)
- Updated sample unshackle.yaml with update checker configuration documentation
- Improved console styling consistency using `bright_black` for dimmed text
- **Environment Dependency Check**: Complete overhaul with detailed categorization and status summary
- Organized dependencies by category (Core, HDR, Download, Subtitle, Player, Network)
- Enhanced status reporting with compact summary display
- Improved tool requirement tracking and missing dependency alerts
- **Hybrid Track Processing**: Significant improvements to HDR10+ and Dolby Vision handling
- Enhanced metadata extraction and processing workflows
- Better integration with HDR processing tools
### Removed
- **Docker Workflow**: Removed Docker build and publish GitHub Actions workflow for manual builds
## [1.1.0] - 2025-07-29 ## [1.1.0] - 2025-07-29

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project] [project]
name = "unshackle" name = "unshackle"
version = "1.1.0" version = "1.2.0"
description = "Modular Movie, TV, and Music Archival Software." description = "Modular Movie, TV, and Music Archival Software."
authors = [{ name = "unshackle team" }] authors = [{ name = "unshackle team" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"

View File

@@ -295,11 +295,41 @@ class dl:
with console.status("Loading Key Vaults...", spinner="dots"): with console.status("Loading Key Vaults...", spinner="dots"):
self.vaults = Vaults(self.service) self.vaults = Vaults(self.service)
total_vaults = len(config.key_vaults)
failed_vaults = []
for vault in config.key_vaults: for vault in config.key_vaults:
vault_type = vault["type"] vault_type = vault["type"]
del vault["type"] vault_name = vault.get("name", vault_type)
self.vaults.load(vault_type, **vault) vault_copy = vault.copy()
self.log.info(f"Loaded {len(self.vaults)} Vaults") del vault_copy["type"]
if vault_type.lower() == "sqlite":
try:
self.vaults.load_critical(vault_type, **vault_copy)
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
except Exception as e:
self.log.error(f"vault failure: {vault_name} ({vault_type}) - {e}")
raise
else:
# Other vaults (MySQL, HTTP, API) - soft fail
if not self.vaults.load(vault_type, **vault_copy):
failed_vaults.append(vault_name)
self.log.debug(f"Failed to load vault: {vault_name} ({vault_type})")
else:
self.log.debug(f"Successfully loaded vault: {vault_name} ({vault_type})")
loaded_count = len(self.vaults)
if failed_vaults:
self.log.warning(f"Failed to load {len(failed_vaults)} vault(s): {', '.join(failed_vaults)}")
self.log.info(f"Loaded {loaded_count}/{total_vaults} Vaults")
# Debug: Show detailed vault status
if loaded_count > 0:
vault_names = [vault.name for vault in self.vaults]
self.log.debug(f"Active vaults: {', '.join(vault_names)}")
else:
self.log.debug("No vaults are currently active")
self.proxy_providers = [] self.proxy_providers = []
if no_proxy: if no_proxy:
@@ -403,6 +433,7 @@ class dl:
# Check if dovi_tool is available when hybrid mode is requested # Check if dovi_tool is available when hybrid mode is requested
if any(r == Video.Range.HYBRID for r in range_): if any(r == Video.Range.HYBRID for r in range_):
from unshackle.core.binaries import DoviTool from unshackle.core.binaries import DoviTool
if not DoviTool: if not DoviTool:
self.log.error("Unable to run hybrid mode: dovi_tool not detected") self.log.error("Unable to run hybrid mode: dovi_tool not detected")
self.log.error("Please install dovi_tool from https://github.com/quietvoid/dovi_tool") self.log.error("Please install dovi_tool from https://github.com/quietvoid/dovi_tool")
@@ -907,32 +938,59 @@ class dl:
# Check if we're in hybrid mode # Check if we're in hybrid mode
if any(r == Video.Range.HYBRID for r in range_) and title.tracks.videos: if any(r == Video.Range.HYBRID for r in range_) and title.tracks.videos:
# Hybrid mode: process DV and HDR10 tracks together # Hybrid mode: process DV and HDR10 tracks separately for each resolution
self.log.info("Processing Hybrid HDR10+DV tracks...") self.log.info("Processing Hybrid HDR10+DV tracks...")
# Run the hybrid processing # Group video tracks by resolution
Hybrid(title.tracks.videos, self.service) resolutions_processed = set()
hdr10_tracks = [v for v in title.tracks.videos if v.range == Video.Range.HDR10]
dv_tracks = [v for v in title.tracks.videos if v.range == Video.Range.DV]
# After hybrid processing, the output file should be in temp directory for hdr10_track in hdr10_tracks:
hybrid_output_path = config.directories.temp / "HDR10-DV.hevc" resolution = hdr10_track.height
if resolution in resolutions_processed:
continue
resolutions_processed.add(resolution)
# Create a single mux task for the hybrid output # Find matching DV track for this resolution (use the lowest DV resolution)
task_description = "Multiplexing Hybrid HDR10+DV" matching_dv = min(dv_tracks, key=lambda v: v.height) if dv_tracks else None
task_id = progress.add_task(f"{task_description}...", total=None, start=False)
# Create tracks with the hybrid video output if matching_dv:
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments # Create track pair for this resolution
resolution_tracks = [hdr10_track, matching_dv]
# Create a new video track for the hybrid output for track in resolution_tracks:
# Use the HDR10 track as a template but update its path track.needs_duration_fix = True
hdr10_track = next((v for v in title.tracks.videos if v.range == Video.Range.HDR10), None)
if hdr10_track:
hybrid_track = deepcopy(hdr10_track)
hybrid_track.path = hybrid_output_path
hybrid_track.range = Video.Range.DV # It's now a DV track
task_tracks.videos = [hybrid_track]
multiplex_tasks.append((task_id, task_tracks)) # Run the hybrid processing for this resolution
Hybrid(resolution_tracks, self.service)
# Create unique output filename for this resolution
hybrid_filename = f"HDR10-DV-{resolution}p.hevc"
hybrid_output_path = config.directories.temp / hybrid_filename
# The Hybrid class creates HDR10-DV.hevc, rename it for this resolution
default_output = config.directories.temp / "HDR10-DV.hevc"
if default_output.exists():
shutil.move(str(default_output), str(hybrid_output_path))
# Create a mux task for this resolution
task_description = f"Multiplexing Hybrid HDR10+DV {resolution}p"
task_id = progress.add_task(f"{task_description}...", total=None, start=False)
# Create tracks with the hybrid video output for this resolution
task_tracks = Tracks(title.tracks) + title.tracks.chapters + title.tracks.attachments
# Create a new video track for the hybrid output
hybrid_track = deepcopy(hdr10_track)
hybrid_track.path = hybrid_output_path
hybrid_track.range = Video.Range.DV # It's now a DV track
hybrid_track.needs_duration_fix = True
task_tracks.videos = [hybrid_track]
multiplex_tasks.append((task_id, task_tracks))
console.print()
else: else:
# Normal mode: process each video track separately # Normal mode: process each video track separately
for video_track in title.tracks.videos or [None]: for video_track in title.tracks.videos or [None]:

View File

@@ -25,52 +25,127 @@ def env() -> None:
@env.command() @env.command()
def check() -> None: def check() -> None:
"""Checks environment for the required dependencies.""" """Checks environment for the required dependencies."""
table = Table(title="Dependencies", expand=True) # Define all dependencies
table.add_column("Name", no_wrap=True) all_deps = [
table.add_column("Required", justify="center") # Core Media Tools
table.add_column("Installed", justify="center") {"name": "FFmpeg", "binary": binaries.FFMPEG, "required": True, "desc": "Media processing", "cat": "Core"},
table.add_column("Path", no_wrap=False, overflow="fold") {"name": "FFprobe", "binary": binaries.FFProbe, "required": True, "desc": "Media analysis", "cat": "Core"},
{"name": "MKVToolNix", "binary": binaries.MKVToolNix, "required": True, "desc": "MKV muxing", "cat": "Core"},
# Define all dependencies with their binary objects and required status {
dependencies = [ "name": "mkvpropedit",
{"name": "FFMpeg", "binary": binaries.FFMPEG, "required": True}, "binary": binaries.Mkvpropedit,
{"name": "FFProbe", "binary": binaries.FFProbe, "required": True}, "required": True,
{"name": "shaka-packager", "binary": binaries.ShakaPackager, "required": True}, "desc": "MKV metadata",
{"name": "MKVToolNix", "binary": binaries.MKVToolNix, "required": True}, "cat": "Core",
{"name": "Mkvpropedit", "binary": binaries.Mkvpropedit, "required": True}, },
{"name": "CCExtractor", "binary": binaries.CCExtractor, "required": False}, {
{"name": "FFPlay", "binary": binaries.FFPlay, "required": False}, "name": "shaka-packager",
{"name": "SubtitleEdit", "binary": binaries.SubtitleEdit, "required": False}, "binary": binaries.ShakaPackager,
{"name": "Aria2(c)", "binary": binaries.Aria2, "required": False}, "required": True,
{"name": "HolaProxy", "binary": binaries.HolaProxy, "required": False}, "desc": "DRM decryption",
{"name": "MPV", "binary": binaries.MPV, "required": False}, "cat": "DRM",
{"name": "Caddy", "binary": binaries.Caddy, "required": False}, },
{"name": "N_m3u8DL-RE", "binary": binaries.N_m3u8DL_RE, "required": False}, # HDR Processing
{"name": "dovi_tool", "binary": binaries.DoviTool, "required": False}, {"name": "dovi_tool", "binary": binaries.DoviTool, "required": False, "desc": "Dolby Vision", "cat": "HDR"},
{
"name": "HDR10Plus_tool",
"binary": binaries.HDR10PlusTool,
"required": False,
"desc": "HDR10+ metadata",
"cat": "HDR",
},
# Downloaders
{"name": "aria2c", "binary": binaries.Aria2, "required": False, "desc": "Multi-thread DL", "cat": "Download"},
{
"name": "N_m3u8DL-RE",
"binary": binaries.N_m3u8DL_RE,
"required": False,
"desc": "HLS/DASH/ISM",
"cat": "Download",
},
# Subtitle Tools
{
"name": "SubtitleEdit",
"binary": binaries.SubtitleEdit,
"required": False,
"desc": "Sub conversion",
"cat": "Subtitle",
},
{
"name": "CCExtractor",
"binary": binaries.CCExtractor,
"required": False,
"desc": "CC extraction",
"cat": "Subtitle",
},
# Media Players
{"name": "FFplay", "binary": binaries.FFPlay, "required": False, "desc": "Simple player", "cat": "Player"},
{"name": "MPV", "binary": binaries.MPV, "required": False, "desc": "Advanced player", "cat": "Player"},
# Network Tools
{
"name": "HolaProxy",
"binary": binaries.HolaProxy,
"required": False,
"desc": "Proxy service",
"cat": "Network",
},
{"name": "Caddy", "binary": binaries.Caddy, "required": False, "desc": "Web server", "cat": "Network"},
] ]
for dep in dependencies: # Track overall status
all_required_installed = True
total_installed = 0
total_required = 0
missing_required = []
# Create a single table
table = Table(
title="Environment Dependencies", title_style="bold", show_header=True, header_style="bold", expand=False
)
table.add_column("Category", style="bold cyan", width=10)
table.add_column("Tool", width=16)
table.add_column("Status", justify="center", width=10)
table.add_column("Req", justify="center", width=4)
table.add_column("Purpose", style="bright_black", width=20)
last_cat = None
for dep in all_deps:
path = dep["binary"] path = dep["binary"]
# Required column # Category column (only show when it changes)
if dep["required"]: category = dep["cat"] if dep["cat"] != last_cat else ""
required = "[red]Yes[/red]" last_cat = dep["cat"]
else:
required = "No"
# Installed column # Status
if path: if path:
installed = "[green]:heavy_check_mark:[/green]" status = "[green][/green]"
path_output = str(path) total_installed += 1
else: else:
installed = "[red]:x:[/red]" status = "[red][/red]"
path_output = "Not Found" if dep["required"]:
all_required_installed = False
missing_required.append(dep["name"])
# Add to the table if dep["required"]:
table.add_row(dep["name"], required, installed, path_output) total_required += 1
# Display the result # Required column (compact)
console.print(Padding(table, (1, 5))) req = "[red]Y[/red]" if dep["required"] else "[bright_black]-[/bright_black]"
# Add row
table.add_row(category, dep["name"], status, req, dep["desc"])
console.print(Padding(table, (1, 2)))
# Compact summary
summary_parts = [f"[bold]Total:[/bold] {total_installed}/{len(all_deps)}"]
if all_required_installed:
summary_parts.append("[green]All required tools installed ✓[/green]")
else:
summary_parts.append(f"[red]Missing required: {', '.join(missing_required)}[/red]")
console.print(Padding(" ".join(summary_parts), (1, 2)))
@env.command() @env.command()
@@ -86,7 +161,7 @@ def info() -> None:
tree.add(f"[repr.number]{i}.[/] [text2]{path.resolve()}[/]") tree.add(f"[repr.number]{i}.[/] [text2]{path.resolve()}[/]")
console.print(Padding(tree, (0, 5))) console.print(Padding(tree, (0, 5)))
table = Table(title="Directories", expand=True) table = Table(title="Directories", title_style="bold", expand=True)
table.add_column("Name", no_wrap=True) table.add_column("Name", no_wrap=True)
table.add_column("Path", no_wrap=False, overflow="fold") table.add_column("Path", no_wrap=False, overflow="fold")

View File

@@ -46,7 +46,8 @@ def copy(to_vault: str, from_vaults: list[str], service: Optional[str] = None) -
vault_type = vault["type"] vault_type = vault["type"]
vault_args = vault.copy() vault_args = vault.copy()
del vault_args["type"] del vault_args["type"]
vaults.load(vault_type, **vault_args) if not vaults.load(vault_type, **vault_args):
raise click.ClickException(f"Failed to load vault ({vault_name}).")
to_vault: Vault = vaults.vaults[0] to_vault: Vault = vaults.vaults[0]
from_vaults: list[Vault] = vaults.vaults[1:] from_vaults: list[Vault] = vaults.vaults[1:]

View File

@@ -1 +1 @@
__version__ = "1.1.0" __version__ = "1.2.0"

View File

@@ -15,6 +15,7 @@ from unshackle.core.commands import Commands
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.console import ComfyRichHandler, console from unshackle.core.console import ComfyRichHandler, console
from unshackle.core.constants import context_settings from unshackle.core.constants import context_settings
from unshackle.core.update_checker import UpdateChecker
from unshackle.core.utilities import rotate_log_file from unshackle.core.utilities import rotate_log_file
LOGGING_PATH = None LOGGING_PATH = None
@@ -79,6 +80,22 @@ def main(version: bool, debug: bool, log_path: Path) -> None:
if version: if version:
return return
if config.update_checks:
try:
latest_version = UpdateChecker.check_for_updates_sync(__version__)
if latest_version:
console.print(
f"\n[yellow]⚠️ Update available![/yellow] "
f"Current: {__version__} → Latest: [green]{latest_version}[/green]",
justify="center",
)
console.print(
"Visit: https://github.com/unshackle-dl/unshackle/releases/latest\n",
justify="center",
)
except Exception:
pass
@atexit.register @atexit.register
def save_log(): def save_log():

View File

@@ -52,6 +52,7 @@ N_m3u8DL_RE = find("N_m3u8DL-RE", "n-m3u8dl-re")
MKVToolNix = find("mkvmerge") MKVToolNix = find("mkvmerge")
Mkvpropedit = find("mkvpropedit") Mkvpropedit = find("mkvpropedit")
DoviTool = find("dovi_tool") DoviTool = find("dovi_tool")
HDR10PlusTool = find("hdr10plus_tool", "HDR10Plus_tool")
__all__ = ( __all__ = (
@@ -69,5 +70,6 @@ __all__ = (
"MKVToolNix", "MKVToolNix",
"Mkvpropedit", "Mkvpropedit",
"DoviTool", "DoviTool",
"HDR10PlusTool",
"find", "find",
) )

View File

@@ -78,6 +78,7 @@ class Config:
self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False) self.set_terminal_bg: bool = kwargs.get("set_terminal_bg", False)
self.tag: str = kwargs.get("tag") or "" self.tag: str = kwargs.get("tag") or ""
self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or "" self.tmdb_api_key: str = kwargs.get("tmdb_api_key") or ""
self.update_checks: bool = kwargs.get("update_checks", True)
@classmethod @classmethod
def from_yaml(cls, path: Path) -> Config: def from_yaml(cls, path: Path) -> Config:

View File

@@ -62,6 +62,7 @@ class Attachment:
session = session or requests.Session() session = session or requests.Session()
response = session.get(url, stream=True) response = session.get(url, stream=True)
response.raise_for_status() response.raise_for_status()
config.directories.temp.mkdir(parents=True, exist_ok=True)
download_path.parent.mkdir(parents=True, exist_ok=True) download_path.parent.mkdir(parents=True, exist_ok=True)
with open(download_path, "wb") as f: with open(download_path, "wb") as f:

View File

@@ -8,7 +8,7 @@ from pathlib import Path
from rich.padding import Padding from rich.padding import Padding
from rich.rule import Rule from rich.rule import Rule
from unshackle.core.binaries import DoviTool from unshackle.core.binaries import DoviTool, HDR10PlusTool
from unshackle.core.config import config from unshackle.core.config import config
from unshackle.core.console import console from unshackle.core.console import console
@@ -20,6 +20,7 @@ class Hybrid:
""" """
Takes the Dolby Vision and HDR10(+) streams out of the VideoTracks. Takes the Dolby Vision and HDR10(+) streams out of the VideoTracks.
It will then attempt to inject the Dolby Vision metadata layer to the HDR10(+) stream. It will then attempt to inject the Dolby Vision metadata layer to the HDR10(+) stream.
If no DV track is available but HDR10+ is present, it will convert HDR10+ to DV.
""" """
global directories global directories
from unshackle.core.tracks import Video from unshackle.core.tracks import Video
@@ -29,17 +30,35 @@ class Hybrid:
self.rpu_file = "RPU.bin" self.rpu_file = "RPU.bin"
self.hdr_type = "HDR10" self.hdr_type = "HDR10"
self.hevc_file = f"{self.hdr_type}-DV.hevc" self.hevc_file = f"{self.hdr_type}-DV.hevc"
self.hdr10plus_to_dv = False
self.hdr10plus_file = "HDR10Plus.json"
console.print(Padding(Rule("[rule.text]HDR10+DV Hybrid"), (1, 2))) # Get resolution info from HDR10 track for display
hdr10_track = next((v for v in videos if v.range == Video.Range.HDR10), None)
hdr10p_track = next((v for v in videos if v.range == Video.Range.HDR10P), None)
track_for_res = hdr10_track or hdr10p_track
self.resolution = f"{track_for_res.height}p" if track_for_res and track_for_res.height else "Unknown"
console.print(Padding(Rule(f"[rule.text]HDR10+DV Hybrid ({self.resolution})"), (1, 2)))
for video in self.videos: for video in self.videos:
if not video.path or not os.path.exists(video.path): if not video.path or not os.path.exists(video.path):
self.log.exit(f" - Video track {video.id} was not downloaded before injection.") self.log.exit(f" - Video track {video.id} was not downloaded before injection.")
if not any(video.range == Video.Range.DV for video in self.videos) or not any( # Check if we have DV track available
video.range == Video.Range.HDR10 for video in self.videos has_dv = any(video.range == Video.Range.DV for video in self.videos)
): has_hdr10 = any(video.range == Video.Range.HDR10 for video in self.videos)
self.log.exit(" - Two VideoTracks available but one of them is not DV nor HDR10(+).") has_hdr10p = any(video.range == Video.Range.HDR10P for video in self.videos)
if not has_hdr10:
self.log.exit(" - No HDR10 track available for hybrid processing.")
# If we have HDR10+ but no DV, we can convert HDR10+ to DV
if not has_dv and has_hdr10p:
self.log.info("✓ No DV track found, but HDR10+ is available. Will convert HDR10+ to DV.")
self.hdr10plus_to_dv = True
elif not has_dv:
self.log.exit(" - No DV track available and no HDR10+ to convert.")
if os.path.isfile(config.directories.temp / self.hevc_file): if os.path.isfile(config.directories.temp / self.hevc_file):
self.log.info("✓ Already Injected") self.log.info("✓ Already Injected")
@@ -53,18 +72,28 @@ class Hybrid:
if video.range == Video.Range.HDR10: if video.range == Video.Range.HDR10:
self.extract_stream(save_path, "HDR10") self.extract_stream(save_path, "HDR10")
elif video.range == Video.Range.HDR10P:
self.extract_stream(save_path, "HDR10")
self.hdr_type = "HDR10+"
elif video.range == Video.Range.DV: elif video.range == Video.Range.DV:
self.extract_stream(save_path, "DV") self.extract_stream(save_path, "DV")
# self.extract_dv_stream(video, save_path)
self.extract_rpu([video for video in videos if video.range == Video.Range.DV][0]) if self.hdr10plus_to_dv:
if os.path.isfile(config.directories.temp / "RPU_UNT.bin"): # Extract HDR10+ metadata and convert to DV
self.rpu_file = "RPU_UNT.bin" hdr10p_video = next(v for v in videos if v.range == Video.Range.HDR10P)
self.level_6() self.extract_hdr10plus(hdr10p_video)
# Mode 3 conversion already done during extraction when not untouched self.convert_hdr10plus_to_dv()
elif os.path.isfile(config.directories.temp / "RPU.bin"): else:
# RPU already extracted with mode 3 # Regular DV extraction
pass dv_video = next(v for v in videos if v.range == Video.Range.DV)
self.extract_rpu(dv_video)
if os.path.isfile(config.directories.temp / "RPU_UNT.bin"):
self.rpu_file = "RPU_UNT.bin"
self.level_6()
# Mode 3 conversion already done during extraction when not untouched
elif os.path.isfile(config.directories.temp / "RPU.bin"):
# RPU already extracted with mode 3
pass
self.injecting() self.injecting()
@@ -72,9 +101,9 @@ class Hybrid:
if self.source == ("itunes" or "appletvplus"): if self.source == ("itunes" or "appletvplus"):
Path.unlink(config.directories.temp / "hdr10.mkv") Path.unlink(config.directories.temp / "hdr10.mkv")
Path.unlink(config.directories.temp / "dv.mkv") Path.unlink(config.directories.temp / "dv.mkv")
Path.unlink(config.directories.temp / "DV.hevc") Path.unlink(config.directories.temp / "HDR10.hevc", missing_ok=True)
Path.unlink(config.directories.temp / "HDR10.hevc") Path.unlink(config.directories.temp / "DV.hevc", missing_ok=True)
Path.unlink(config.directories.temp / f"{self.rpu_file}") Path.unlink(config.directories.temp / f"{self.rpu_file}", missing_ok=True)
def ffmpeg_simple(self, save_path, output): def ffmpeg_simple(self, save_path, output):
"""Simple ffmpeg execution without progress tracking""" """Simple ffmpeg execution without progress tracking"""
@@ -106,142 +135,6 @@ class Hybrid:
self.log.error(f"x Failed extracting {type_} stream") self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1) sys.exit(1)
def ffmpeg_task(self, save_path, output, task_id):
p = subprocess.Popen(
[
"ffmpeg",
"-nostdin",
"-i",
str(save_path),
"-c:v",
"copy",
str(output),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
)
self.progress.start_task(task_id)
for line in p.stderr:
if "frame=" in line:
self.progress.update(task_id, advance=0)
p.wait()
return p.returncode
def extract_hdr10_stream(self, video, save_path):
type_ = "HDR10"
if os.path.isfile(Path(config.directories.temp / f"{type_}.hevc")):
return
if self.source == "itunes" or self.source == "appletvplus":
self.log.info("+ Muxing HDR10 stream for fixing MP4 file")
subprocess.run(
[
"mkvmerge",
"-o",
Path(config.directories.temp / "hdr10.mkv"),
save_path,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.log.info(f"+ Extracting {type_} stream")
extract_stream = subprocess.run(
[
"ffmpeg",
"-nostdin",
"-stats",
"-i",
Path(config.directories.temp / "hdr10.mkv"),
"-c:v",
"copy",
Path(config.directories.temp / f"{type_}.hevc"),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extract_stream.returncode:
Path.unlink(Path(config.directories.temp / f"{type_}.hevc"))
self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1)
else:
extract_stream = subprocess.run(
[
"ffmpeg",
"-nostdin",
"-stats",
"-i",
save_path,
"-c:v",
"copy",
Path(config.directories.temp / f"{type_}.hevc"),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extract_stream.returncode:
Path.unlink(Path(config.directories.temp / f"{type_}.hevc"))
self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1)
def extract_dv_stream(self, video, save_path):
type_ = "DV"
if os.path.isfile(Path(config.directories.temp / f"{type_}.hevc")):
return
if self.source == "itunes" or self.source == "appletvplus":
self.log.info("+ Muxing Dolby Vision stream for fixing MP4 file")
subprocess.run(
[
"mkvmerge",
"-o",
Path(config.directories.temp / "dv.mkv"),
save_path,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
self.log.info("+ Extracting Dolby Vision stream")
extract_stream = subprocess.run(
[
"ffmpeg",
"-nostdin",
"-stats",
"-i",
Path(config.directories.temp / "dv.mkv"),
"-an",
"-c:v",
"copy",
"-f",
"hevc",
Path(config.directories.temp / "out_1.h265"),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extract_stream.returncode:
Path.unlink(Path(config.directories.temp / f"{type_}.hevc"))
self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1)
else:
extract_stream = subprocess.run(
[
"mp4demuxer",
"--input-file",
save_path,
"--output-folder",
Path(config.directories.temp),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extract_stream.returncode:
Path.unlink(Path(config.directories.temp / f"{type_}.hevc"))
self.log.error(f"x Failed extracting {type_} stream")
sys.exit(1)
def extract_rpu(self, video, untouched=False): def extract_rpu(self, video, untouched=False):
if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile( if os.path.isfile(config.directories.temp / "RPU.bin") or os.path.isfile(
config.directories.temp / "RPU_UNT.bin" config.directories.temp / "RPU_UNT.bin"
@@ -315,51 +208,31 @@ class Hybrid:
# Update rpu_file to use the edited version # Update rpu_file to use the edited version
self.rpu_file = "RPU_L6.bin" self.rpu_file = "RPU_L6.bin"
def mode_3(self):
"""Convert RPU to Mode 3"""
with open(config.directories.temp / "M3.json", "w+") as mode3_file:
json.dump({"mode": 3}, mode3_file, indent=3)
if not os.path.isfile(config.directories.temp / "RPU_M3.bin"):
self.log.info("+ Converting RPU to Mode 3")
mode3 = subprocess.run(
[
str(DoviTool),
"editor",
"-i",
config.directories.temp / self.rpu_file,
"-j",
config.directories.temp / "M3.json",
"-o",
config.directories.temp / "RPU_M3.bin",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if mode3.returncode:
Path.unlink(config.directories.temp / "RPU_M3.bin")
self.log.exit("x Failed converting RPU to Mode 3")
self.rpu_file = "RPU_M3.bin"
def injecting(self): def injecting(self):
if os.path.isfile(config.directories.temp / self.hevc_file): if os.path.isfile(config.directories.temp / self.hevc_file):
return return
self.log.info(f"+ Injecting Dolby Vision metadata into {self.hdr_type} stream") self.log.info(f"+ Injecting Dolby Vision metadata into {self.hdr_type} stream")
inject_cmd = [
str(DoviTool),
"inject-rpu",
"-i",
config.directories.temp / "HDR10.hevc",
"--rpu-in",
config.directories.temp / self.rpu_file,
]
# If we converted from HDR10+, optionally remove HDR10+ metadata during injection
# Default to removing HDR10+ metadata since we're converting to DV
if self.hdr10plus_to_dv:
inject_cmd.append("--drop-hdr10plus")
self.log.info(" - Removing HDR10+ metadata during injection")
inject_cmd.extend(["-o", config.directories.temp / self.hevc_file])
inject = subprocess.run( inject = subprocess.run(
[ inject_cmd,
str(DoviTool),
"inject-rpu",
"-i",
config.directories.temp / f"{self.hdr_type}.hevc",
"--rpu-in",
config.directories.temp / self.rpu_file,
"-o",
config.directories.temp / self.hevc_file,
],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
) )
@@ -367,3 +240,80 @@ class Hybrid:
if inject.returncode: if inject.returncode:
Path.unlink(config.directories.temp / self.hevc_file) Path.unlink(config.directories.temp / self.hevc_file)
self.log.exit("x Failed injecting Dolby Vision metadata into HDR10 stream") self.log.exit("x Failed injecting Dolby Vision metadata into HDR10 stream")
def extract_hdr10plus(self, _video):
"""Extract HDR10+ metadata from the video stream"""
if os.path.isfile(config.directories.temp / self.hdr10plus_file):
return
if not HDR10PlusTool:
self.log.exit("x HDR10Plus_tool not found. Please install it to use HDR10+ to DV conversion.")
self.log.info("+ Extracting HDR10+ metadata")
# HDR10Plus_tool needs raw HEVC stream
extraction = subprocess.run(
[
str(HDR10PlusTool),
"extract",
str(config.directories.temp / "HDR10.hevc"),
"-o",
str(config.directories.temp / self.hdr10plus_file),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if extraction.returncode:
self.log.exit("x Failed extracting HDR10+ metadata")
# Check if the extracted file has content
if os.path.getsize(config.directories.temp / self.hdr10plus_file) == 0:
self.log.exit("x No HDR10+ metadata found in the stream")
def convert_hdr10plus_to_dv(self):
"""Convert HDR10+ metadata to Dolby Vision RPU"""
if os.path.isfile(config.directories.temp / "RPU.bin"):
return
self.log.info("+ Converting HDR10+ metadata to Dolby Vision")
# First create the extra metadata JSON for dovi_tool
extra_metadata = {
"cm_version": "V29",
"length": 0, # dovi_tool will figure this out
"level6": {
"max_display_mastering_luminance": 1000,
"min_display_mastering_luminance": 1,
"max_content_light_level": 0,
"max_frame_average_light_level": 0,
},
}
with open(config.directories.temp / "extra.json", "w") as f:
json.dump(extra_metadata, f, indent=2)
# Generate DV RPU from HDR10+ metadata
conversion = subprocess.run(
[
str(DoviTool),
"generate",
"-j",
str(config.directories.temp / "extra.json"),
"--hdr10plus-json",
str(config.directories.temp / self.hdr10plus_file),
"-o",
str(config.directories.temp / "RPU.bin"),
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if conversion.returncode:
self.log.exit("x Failed converting HDR10+ to Dolby Vision")
self.log.info("✓ HDR10+ successfully converted to Dolby Vision Profile 8")
# Clean up temporary files
Path.unlink(config.directories.temp / "extra.json")
Path.unlink(config.directories.temp / self.hdr10plus_file)

View File

@@ -331,21 +331,31 @@ class Tracks:
if not vt.path or not vt.path.exists(): if not vt.path or not vt.path.exists():
raise ValueError("Video Track must be downloaded before muxing...") raise ValueError("Video Track must be downloaded before muxing...")
events.emit(events.Types.TRACK_MULTIPLEX, track=vt) events.emit(events.Types.TRACK_MULTIPLEX, track=vt)
cl.extend(
[ # Prepare base arguments
"--language", video_args = [
f"0:{vt.language}", "--language",
"--default-track", f"0:{vt.language}",
f"0:{i == 0}", "--default-track",
"--original-flag", f"0:{i == 0}",
f"0:{vt.is_original_lang}", "--original-flag",
"--compression", f"0:{vt.is_original_lang}",
"0:none", # disable extra compression "--compression",
"(", "0:none", # disable extra compression
str(vt.path), ]
")",
] # Add FPS fix if needed (typically for hybrid mode to prevent sync issues)
) if hasattr(vt, "needs_duration_fix") and vt.needs_duration_fix and vt.fps:
video_args.extend(
[
"--default-duration",
f"0:{vt.fps}fps" if isinstance(vt.fps, str) else f"0:{vt.fps:.3f}fps",
"--fix-bitstream-timing-information",
"0:1",
]
)
cl.extend(video_args + ["(", str(vt.path), ")"])
for i, at in enumerate(self.audio): for i, at in enumerate(self.audio):
if not at.path or not at.path.exists(): if not at.path or not at.path.exists():

View File

@@ -237,6 +237,8 @@ class Video(Track):
except Exception as e: except Exception as e:
raise ValueError("Expected fps to be a number, float, or a string as numerator/denominator form, " + str(e)) raise ValueError("Expected fps to be a number, float, or a string as numerator/denominator form, " + str(e))
self.needs_duration_fix = False
def __str__(self) -> str: def __str__(self) -> str:
return " | ".join( return " | ".join(
filter( filter(

View File

@@ -0,0 +1,106 @@
from __future__ import annotations
import asyncio
from typing import Optional
import requests
class UpdateChecker:
"""Check for available updates from the GitHub repository."""
REPO_URL = "https://api.github.com/repos/unshackle-dl/unshackle/releases/latest"
TIMEOUT = 5
@staticmethod
def _compare_versions(current: str, latest: str) -> bool:
"""
Simple semantic version comparison.
Args:
current: Current version string (e.g., "1.1.0")
latest: Latest version string (e.g., "1.2.0")
Returns:
True if latest > current, False otherwise
"""
try:
current_parts = [int(x) for x in current.split(".")]
latest_parts = [int(x) for x in latest.split(".")]
max_length = max(len(current_parts), len(latest_parts))
current_parts.extend([0] * (max_length - len(current_parts)))
latest_parts.extend([0] * (max_length - len(latest_parts)))
for current_part, latest_part in zip(current_parts, latest_parts):
if latest_part > current_part:
return True
elif latest_part < current_part:
return False
return False
except (ValueError, AttributeError):
return False
@classmethod
async def check_for_updates(cls, current_version: str) -> Optional[str]:
"""
Check if there's a newer version available on GitHub.
Args:
current_version: The current version string (e.g., "1.1.0")
Returns:
The latest version string if an update is available, None otherwise
"""
try:
loop = asyncio.get_event_loop()
response = await loop.run_in_executor(None, lambda: requests.get(cls.REPO_URL, timeout=cls.TIMEOUT))
if response.status_code != 200:
return None
data = response.json()
latest_version = data.get("tag_name", "").lstrip("v")
if not latest_version:
return None
if cls._compare_versions(current_version, latest_version):
return latest_version
except Exception:
pass
return None
@classmethod
def check_for_updates_sync(cls, current_version: str) -> Optional[str]:
"""
Synchronous version of update check.
Args:
current_version: The current version string (e.g., "1.1.0")
Returns:
The latest version string if an update is available, None otherwise
"""
try:
response = requests.get(cls.REPO_URL, timeout=cls.TIMEOUT)
if response.status_code != 200:
return None
data = response.json()
latest_version = data.get("tag_name", "").lstrip("v")
if not latest_version:
return None
if cls._compare_versions(current_version, latest_version):
return latest_version
except Exception:
pass
return None

View File

@@ -25,8 +25,20 @@ class Vaults:
def __len__(self) -> int: def __len__(self) -> int:
return len(self.vaults) return len(self.vaults)
def load(self, type_: str, **kwargs: Any) -> None: def load(self, type_: str, **kwargs: Any) -> bool:
"""Load a Vault into the vaults list.""" """Load a Vault into the vaults list. Returns True if successful, False otherwise."""
module = _MODULES.get(type_)
if not module:
raise ValueError(f"Unable to find vault command by the name '{type_}'.")
try:
vault = module(**kwargs)
self.vaults.append(vault)
return True
except Exception:
return False
def load_critical(self, type_: str, **kwargs: Any) -> None:
"""Load a critical Vault that must succeed or raise an exception."""
module = _MODULES.get(type_) module = _MODULES.get(type_)
if not module: if not module:
raise ValueError(f"Unable to find vault command by the name '{type_}'.") raise ValueError(f"Unable to find vault command by the name '{type_}'.")

View File

@@ -4,6 +4,9 @@ tag: user_tag
# Set terminal background color (custom option not in CONFIG.md) # Set terminal background color (custom option not in CONFIG.md)
set_terminal_bg: false set_terminal_bg: false
# Check for updates from GitHub repository on startup (default: true)
update_checks: true
# Muxing configuration # Muxing configuration
muxing: muxing:
set_title: false set_title: false
@@ -129,6 +132,15 @@ filenames:
# API key for The Movie Database (TMDB) # API key for The Movie Database (TMDB)
tmdb_api_key: "" tmdb_api_key: ""
# conversion_method:
# - auto (default): Smart routing - subby for WebVTT/SAMI, standard for others
# - subby: Always use subby with advanced processing
# - pycaption: Use only pycaption library (no SubtitleEdit, no subby)
# - subtitleedit: Prefer SubtitleEdit when available, fall back to pycaption
subtitle:
conversion_method: auto
sdh_method: auto
# Configuration for pywidevine's serve functionality # Configuration for pywidevine's serve functionality
serve: serve:
users: users:

2
uv.lock generated
View File

@@ -1505,7 +1505,7 @@ wheels = [
[[package]] [[package]]
name = "unshackle" name = "unshackle"
version = "1.1.0" version = "1.2.0"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "appdirs" }, { name = "appdirs" },