Compare commits
No commits in common. "main" and "v0.4.1" have entirely different histories.
22 changed files with 71 additions and 424 deletions
|
@ -1,5 +1,5 @@
|
||||||
[bumpversion]
|
[bumpversion]
|
||||||
current_version = 0.4.6
|
current_version = 0.4.1
|
||||||
commit = True
|
commit = True
|
||||||
tag = True
|
tag = True
|
||||||
|
|
||||||
|
|
|
@ -44,8 +44,8 @@ steps:
|
||||||
# commands:
|
# commands:
|
||||||
# - buildah login -u $DOCKER_USER -p $DOCKER_PASS -- $DOCKER_REGISTRY
|
# - buildah login -u $DOCKER_USER -p $DOCKER_PASS -- $DOCKER_REGISTRY
|
||||||
# - buildah manifest create ucast
|
# - buildah manifest create ucast
|
||||||
# - buildah bud --tag code.thetadev.de/hsa/ucast:latest --manifest ucast --arch amd64 --build-arg TARGETPLATFORM=linux/amd64 -f deploy/Dockerfile .
|
# - buildah bud --tag code.thetadev.de/hsa/ucast:latest --manifest ucast --arch amd64 -f deploy/Dockerfile .
|
||||||
# - buildah bud --tag code.thetadev.de/hsa/ucast:latest --manifest ucast --arch arm64 --build-arg TARGETPLATFORM=linux/arm64 -f deploy/Dockerfile .
|
# - buildah bud --tag code.thetadev.de/hsa/ucast:latest --manifest ucast --arch arm64 -f deploy/Dockerfile .
|
||||||
# - buildah manifest push --all ucast docker://code.thetadev.de/hsa/ucast:latest
|
# - buildah manifest push --all ucast docker://code.thetadev.de/hsa/ucast:latest
|
||||||
# environment:
|
# environment:
|
||||||
# DOCKER_REGISTRY:
|
# DOCKER_REGISTRY:
|
||||||
|
|
|
@ -3,7 +3,6 @@ services:
|
||||||
ucast:
|
ucast:
|
||||||
image: thetadev256/ucast
|
image: thetadev256/ucast
|
||||||
user: 1000:1000
|
user: 1000:1000
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
ports:
|
||||||
- "8001:8001"
|
- "8001:8001"
|
||||||
volumes:
|
volumes:
|
||||||
|
@ -11,11 +10,7 @@ services:
|
||||||
environment:
|
environment:
|
||||||
UCAST_REDIS_URL: "redis://redis:6379"
|
UCAST_REDIS_URL: "redis://redis:6379"
|
||||||
UCAST_SECRET_KEY: "django-insecure-Es/+plApGxNBy8+ewB+74zMlmfV2H3whw6gu7i0ESwGrEWAUYRP3HM2EX0PLr3UJ"
|
UCAST_SECRET_KEY: "django-insecure-Es/+plApGxNBy8+ewB+74zMlmfV2H3whw6gu7i0ESwGrEWAUYRP3HM2EX0PLr3UJ"
|
||||||
UCAST_ALLOWED_HOSTS: ".localhost,127.0.0.1"
|
|
||||||
UCAST_N_WORKERS: 2
|
|
||||||
UCAST_TZ: "Europe/Berlin"
|
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
container_name: redis
|
container_name: redis
|
||||||
image: redis:alpine
|
image: redis:alpine
|
||||||
restart: unless-stopped
|
|
||||||
|
|
|
@ -1,245 +1,4 @@
|
||||||
Einleitung
|
Einleitung
|
||||||
##########
|
##########
|
||||||
|
|
||||||
Bei den meisten YouTube-Videos, die ich mir anschaue, handelt es sich um
|
Hello World
|
||||||
Nachrichten oder Kommentarvideos. Da diese Videos sehr textlastig sind,
|
|
||||||
spiele ich sie oft im Hintergrund ab und arbeite währenddessen an meinen Projekten.
|
|
||||||
|
|
||||||
Unterwegs habe ich aber keine Möglichkeit, YouTube-Videos im Hintergrund
|
|
||||||
abzuspielen, da die YouTube-App im Hintergrund die Wiedergabe unterbricht.
|
|
||||||
Es ist zwar möglich, YouTube-Videos mit entsprechenden Webdiensten herunterzuladen,
|
|
||||||
dies ist aber relativ unkomfortabel.
|
|
||||||
|
|
||||||
Deshalb höre ich unterwegs häufiger Podcasts, die mit entsprechenden Apps
|
|
||||||
(ich benutze AntennaPod) sowohl gestreamt als auch offline aufs Handy geladen werden
|
|
||||||
können.
|
|
||||||
|
|
||||||
Ich habe dann überlegt, ob es möglch wäre, YouTube-Kanäle automatisch in Podcasts
|
|
||||||
umzuwandeln. So kam ich auf die Idee, einen Server zu entwickeln,
|
|
||||||
der YouTube-Videos automatisch als MP3-Dateien herunterlädt und im Podcast-Format
|
|
||||||
bereitstellt. Auf diese Weise kann man sich die Audioinhalte von YouTube sowohl
|
|
||||||
am PC als auch unterwegs mit einer Podcast-App anhören.
|
|
||||||
|
|
||||||
Technik
|
|
||||||
#######
|
|
||||||
|
|
||||||
Webframework
|
|
||||||
************
|
|
||||||
|
|
||||||
Ich habe ucast mit dem Webframework Django entwickelt. Django hat den Vorteil,
|
|
||||||
das es grundlegende Funktionen von Webanwendungen wie ein Login-System bereits
|
|
||||||
implementiert hat. Dadurch konnte ich mich schneller auf die eigentlichen Features
|
|
||||||
meiner Anwendung konzentrieren.
|
|
||||||
|
|
||||||
|
|
||||||
YouTube-Downloading
|
|
||||||
*******************
|
|
||||||
|
|
||||||
Zum Herunterladen von Videos wird die Python-Library
|
|
||||||
`yt-dlp <https://github.com/yt-dlp/yt-dlp>`_ verwendet.
|
|
||||||
Diese Library kann Videos von YouTube und diversen anderen Videoplattformen
|
|
||||||
herunterladen und mithilfe von ffmpeg ins MP3-Format konvertieren.
|
|
||||||
|
|
||||||
Yt-dlp benötigt den Link oder die YouTube-ID eines Videos, um es herunterladen zu können.
|
|
||||||
Deswegen wird zusätzlich eine Möglichkeit benötigt, die aktuellen Videos eines
|
|
||||||
Kanals und dessen Metadaten (Profilbild, Beschreibung) abzurufen.
|
|
||||||
|
|
||||||
Hierfür gibt es zwei Möglichkeiten:
|
|
||||||
erstens Scraping der YouTube-Webseite und zweitens YouTube's eigene RSS-Feeds.
|
|
||||||
|
|
||||||
YouTube stellt für jeden Kanal einen RSS-Feed unter der Adresse
|
|
||||||
``https://www.youtube.com/feeds/videos.xml?channel_id=<Kanal-ID>`` bereit.
|
|
||||||
Der Feed listet allerdings nur die letzten 15 Videos eines Kanals auf.
|
|
||||||
Um ältere Videos sowie die Metadaten eines Kanals abrufen
|
|
||||||
zu können, muss die YouTube-Webseite aufgerufen und geparsed werden. Hierfür habe ich
|
|
||||||
die ``scrapetube``-Library als Grundlage verwendet und um eine Methode zum Abrufen
|
|
||||||
von Kanalinformationen erweitert.
|
|
||||||
|
|
||||||
|
|
||||||
Task-Queue
|
|
||||||
**********
|
|
||||||
|
|
||||||
Ucast muss regelmäßig die abonnierten Kanäle abrufen und Videos herunterladen.
|
|
||||||
Hier kommt eine `Task-Queue <https://python-rq.org>`_
|
|
||||||
zum Einsatz. Die Webanwendung kann neue Tasks in die
|
|
||||||
Queue einreihen, die dann im Hintergrund von Workern ausgeführt werden.
|
|
||||||
Mit einem Scheduler ist es auch möglich, periodisch (bspw. alle 15 Minuten)
|
|
||||||
Tasks auszuführen.
|
|
||||||
|
|
||||||
Die Queue benötigt eine Möglichkeit, Daten zwischen der Anwendung und den Workern
|
|
||||||
auszutauschen. Hier kommt eine Redis-Datenbank zum Einsatz.
|
|
||||||
|
|
||||||
|
|
||||||
Frontend
|
|
||||||
********
|
|
||||||
|
|
||||||
Da Ucast keine komplexen Funktionen auf der Clientseite bereitstellen muss,
|
|
||||||
wird das Frontend mithilfe von Django-Templates serverseitig gerendert und es
|
|
||||||
wurde auf ein Frontend-Framework verzichtet. Als CSS-Framework habe ich Bulma
|
|
||||||
verwendet, was eine Bibliothek von Komponenten bereitstellt. Bulma ist in Sass
|
|
||||||
geschrieben, wodurch es einfach an ein gewünschtes Designsthema angepasst werden kann.
|
|
||||||
|
|
||||||
Komplett auf Javascript verzichtet habe ich jedoch nicht.
|
|
||||||
Beispielsweise habe ich ``clipboard.js`` verwendet, um die Feed-URLs mit Klick auf einen
|
|
||||||
Button kopieren zu können.
|
|
||||||
|
|
||||||
Das endlose Scrolling auf den Videoseiten habe ich mit ``htmx`` umgesetzt, einer
|
|
||||||
JS-Library, mit der man dynamisch Webinhalte nachladen kann, ohne dafür eigenen
|
|
||||||
JS-Code zu schreiben.
|
|
||||||
|
|
||||||
|
|
||||||
Inbetriebnahme
|
|
||||||
##############
|
|
||||||
|
|
||||||
Docker-Compose
|
|
||||||
**************
|
|
||||||
|
|
||||||
Ucast ist als Docker-Image mit dem Namen
|
|
||||||
`thetadev256/ucast <https://hub.docker.com/r/thetadev256/ucast>`_ verfügbar.
|
|
||||||
Eine docker-compose-Datei mit einer Basiskonfiguration befindet sich im
|
|
||||||
Projektordner unter ``deploy/docker-compose.yml``. Um Ucast zu starten, müssen
|
|
||||||
die folgenden Befehle ausgeführt werden.
|
|
||||||
|
|
||||||
.. code-block:: sh
|
|
||||||
|
|
||||||
mkdir _run # Arbeitsverzeichnis erstellen
|
|
||||||
docker-compose -f deploy/docker-compose.yml up -d # Anwendung starten
|
|
||||||
docker exec -it ucast-ucast-1 ucast-manage createsuperuser # Benutzerkonto anlegen
|
|
||||||
|
|
||||||
Die Weboberfläche ist unter http://127.0.0.1:8001 erreichbar.
|
|
||||||
|
|
||||||
Konfiguration
|
|
||||||
*************
|
|
||||||
|
|
||||||
Die Konfiguration erfolgt durch Umgebungsvariablen. Alle Umgebungsvariablen
|
|
||||||
sind mit dem Präfix ``UCAST_`` zu versehen (z.B. ``UCAST_DEBUG``).
|
|
||||||
|
|
||||||
**DEBUG**
|
|
||||||
`Debug-Modus <https://docs.djangoproject.com/en/4.0/ref/settings/#debug>`_ von Django aktivieren.
|
|
||||||
Standard: ``false``
|
|
||||||
|
|
||||||
**ALLOWED_HOSTS**
|
|
||||||
Erlaubte `Hosts/Domains <https://docs.djangoproject.com/en/4.0/ref/settings/#allowed-hosts>`_.
|
|
||||||
Beispiel: ``"ucast.thetadev.de"``
|
|
||||||
|
|
||||||
**DB_ENGINE**
|
|
||||||
Verwendete Datenbanksoftware (``sqlite`` / ``mysql`` / ``postgresql``).
|
|
||||||
Standard: ``sqlite``
|
|
||||||
|
|
||||||
**DB_NAME**
|
|
||||||
Name der Datenbank. Standard: ``db``
|
|
||||||
|
|
||||||
**DB_HOST**
|
|
||||||
Adresse der Datenbank. Standard: ``127.0.0.1``
|
|
||||||
|
|
||||||
**DB_PORT**
|
|
||||||
Port der Datenbank. Standard: 3306 (mysql), 5432 (postgresql)
|
|
||||||
|
|
||||||
**DB_USER**, **DB_PASS**
|
|
||||||
Benutzername/Passwort für die Datenbank
|
|
||||||
|
|
||||||
**WORKDIR**
|
|
||||||
Hauptverzeichnis für Ucast (Siehe Verzeichnisstruktur).
|
|
||||||
Standard: aktuelles Arbeitsverzeichnis
|
|
||||||
|
|
||||||
**STATIC_ROOT**
|
|
||||||
Ordner für statische Dateien (``WORKDIR/static``)
|
|
||||||
|
|
||||||
**DOWNLOAD_ROOT**
|
|
||||||
Ordner für heruntergeladene Bilder und Audiodateien (``WORKDIR/data``)
|
|
||||||
|
|
||||||
**CACHE_ROOT**
|
|
||||||
Ordner für temporäre Dateien (``{WORKDIR}/cache``)
|
|
||||||
|
|
||||||
**DB_DIR**
|
|
||||||
Ordner für die SQLite-Datenbankdatei (``{WORKDIR}/db``)
|
|
||||||
|
|
||||||
**TZ**
|
|
||||||
Zeitzone. Standard: Systemeinstellung
|
|
||||||
|
|
||||||
**REDIS_URL**
|
|
||||||
Redis-Addresse. Standard: ``redis://localhost:6379``
|
|
||||||
|
|
||||||
**REDIS_QUEUE_TIMEOUT**
|
|
||||||
Timeout für gestartete Jobs [s]. Standard: 600
|
|
||||||
|
|
||||||
**REDIS_QUEUE_RESULT_TTL**
|
|
||||||
Speicherdauer für abgeschlossene Tasks [s]. Standard: 600
|
|
||||||
|
|
||||||
**YT_UPDATE_INTERVAL**
|
|
||||||
Zeitabstand, in dem die YouTube-Kanäle abgerufen werden [s].
|
|
||||||
Standard: 900
|
|
||||||
|
|
||||||
**FEED_MAX_ITEMS**
|
|
||||||
Maximale Anzahl Videos, die in den Feeds enthalten sind.
|
|
||||||
Standard: 50
|
|
||||||
|
|
||||||
**N_WORKERS**
|
|
||||||
Anzahl an Worker-Prozessen, die gestartet werden sollen
|
|
||||||
(nur im Docker-Container verfügbar).
|
|
||||||
Standard: 1
|
|
||||||
|
|
||||||
|
|
||||||
Verzeichnisstruktur
|
|
||||||
*******************
|
|
||||||
|
|
||||||
Ucast erstellt in seinem Arbeitsverzeichnis vier Unterordner, in denen die
|
|
||||||
Daten der Anwendung abgelegt werden.
|
|
||||||
|
|
||||||
.. code-block:: txt
|
|
||||||
|
|
||||||
- workdir
|
|
||||||
|_ cache Temporäre Dateien
|
|
||||||
|_ data Heruntergeladene Medien
|
|
||||||
|_ db SQLite-Datenbank
|
|
||||||
|_ static Statische Websitedaten
|
|
||||||
|
|
||||||
|
|
||||||
Bedienung
|
|
||||||
#########
|
|
||||||
|
|
||||||
Nach dem Login kommt man auf die Übersichtsseite, auf der alle abonnierten
|
|
||||||
Kanäle aufgelistet werden. Um einen neuen Kanal zu abonnieren, muss die YouTube-URL
|
|
||||||
(z.B. https://youtube.com/channel/UCGiJh0NZ52wRhYKYnuZI08Q)
|
|
||||||
in das Eingabefeld kopiert werden.
|
|
||||||
|
|
||||||
Wurde ein neuer Kanal hinzugefügt, beginnt ucast damit, die neuesten 15 Videos
|
|
||||||
herunterzuladen. Um zu überprüfen, welche Videos momentan heruntergeladen werden,
|
|
||||||
kann man auf die *Downloads*-Seite gehen. Auf dieser Seite werden auch fehlgeschlagene
|
|
||||||
Downloadtasks aufgelistet, die auch manuell wiederholt werden können (bspw. nach einem
|
|
||||||
Ausfall der Internetverbindung). Es gibt auch eine Suchfunktion, mit der man nach
|
|
||||||
einem Video mit einem bestimmten Titel suchen kann.
|
|
||||||
|
|
||||||
Um die abonnierten Kanäle zu seinem Podcast-Client hinzuzufügen, kann man die
|
|
||||||
Feed-URL auf der Übersichtsseite einfach kopieren und einfügen.
|
|
||||||
|
|
||||||
Die meisten Podcast-Clients bieten zudem eine Funktion zum Import von OPML-Dateien an.
|
|
||||||
In diesem Fall kann man einfach auf den Link *Download OPML* unten auf der Seite
|
|
||||||
klicken und die heruntergeladen Datei importieren. Auf diese Weise hat man schnell
|
|
||||||
alle abonnierten Kanäle zu seinem Podcast-Client hinzugefügt.
|
|
||||||
|
|
||||||
|
|
||||||
Fazit
|
|
||||||
#####
|
|
||||||
|
|
||||||
Ich betreibe Ucast seit einer Woche auf meiner NAS
|
|
||||||
und verwende es, um mir Videos sowohl am Rechner als auch unterwegs anzuhören.
|
|
||||||
|
|
||||||
In den ersten Tagen habe ich noch einige Bugs festgestellt, die beseitigt werden
|
|
||||||
mussten. Beispielsweise liegen nicht alle YouTube-Thumbnails im 16:9-Format vor,
|
|
||||||
weswegen sie zugeschnitten werden müssen, um das Layout der Webseite nicht zu
|
|
||||||
verschieben.
|
|
||||||
|
|
||||||
Am Anfang habe ich geplant, `SponsorBlock <https://sponsor.ajay.app>`_ in Ucast
|
|
||||||
zu integrieren, um Werbeinhalte aus den Videos zu entfernen. Yt-dlp hat dieses
|
|
||||||
Feature bereits integriert. Allerdings basiert Sponsorblock auf einer von der
|
|
||||||
Community verwalteten Datenbank, d.h. je nach Beliebtheit des Videos dauert es
|
|
||||||
zwischen einer halben und mehreren Stunden nach Release, bis Markierungen verfügbar
|
|
||||||
sind. Damit Sponsorblock zuverlässig funktioniert, müsste Ucast regelmäßig nach dem
|
|
||||||
Release des Videos die Datenbank abfragen und das Video bei Änderungen erneut
|
|
||||||
herunterladen und zuschneiden. Dies war mir zunächst zu komplex und ich habe mich
|
|
||||||
dazu entschieden, das Feature erst in Zukunft umzusetzen.
|
|
||||||
|
|
||||||
Ein weiteres Feature, das ich in Zukunft umsetzen werde,
|
|
||||||
ist die Unterstützung von alternativen Videoplattformen wie Peertube,
|
|
||||||
Odysee und Bitchute.
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "ucast"
|
name = "ucast"
|
||||||
version = "0.4.6"
|
version = "0.4.1"
|
||||||
description = "YouTube to Podcast converter"
|
description = "YouTube to Podcast converter"
|
||||||
authors = ["Theta-Dev <t.testboy@gmail.com>"]
|
authors = ["Theta-Dev <t.testboy@gmail.com>"]
|
||||||
packages = [
|
packages = [
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
__version__ = "0.4.6"
|
__version__ = "0.4.1"
|
||||||
|
|
||||||
|
|
||||||
def template_context(request):
|
def template_context(request):
|
||||||
|
|
|
@ -70,16 +70,6 @@ class Channel(models.Model):
|
||||||
"download_size__sum"
|
"download_size__sum"
|
||||||
)
|
)
|
||||||
|
|
||||||
def vfilter_args(self) -> dict:
|
|
||||||
filter_args = {}
|
|
||||||
if self.skip_livestreams:
|
|
||||||
filter_args["is_livestream"] = False
|
|
||||||
|
|
||||||
if self.skip_shorts:
|
|
||||||
filter_args["is_short"] = False
|
|
||||||
|
|
||||||
return filter_args
|
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return self.name
|
return self.name
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,6 @@ import redis
|
||||||
import rq
|
import rq
|
||||||
import rq_scheduler
|
import rq_scheduler
|
||||||
from django.conf import settings
|
from django.conf import settings
|
||||||
from django.db.models import ObjectDoesNotExist
|
|
||||||
from rq import registry
|
from rq import registry
|
||||||
|
|
||||||
from ucast.models import Video
|
from ucast.models import Video
|
||||||
|
@ -95,21 +94,15 @@ def get_failed_job_registry():
|
||||||
|
|
||||||
def get_downloading_videos(offset=0, limit=-1):
|
def get_downloading_videos(offset=0, limit=-1):
|
||||||
queue = get_queue()
|
queue = get_queue()
|
||||||
v_ids = set()
|
videos = {}
|
||||||
|
|
||||||
for job in queue.get_jobs(offset, limit):
|
for job in queue.get_jobs(offset, limit):
|
||||||
if (
|
if (
|
||||||
job.func_name == "ucast.tasks.download.download_video"
|
job.func_name == "ucast.tasks.download.download_video"
|
||||||
and job.args
|
and job.args
|
||||||
and job.args[0] > 0
|
and isinstance(job.args[0], Video)
|
||||||
):
|
):
|
||||||
v_ids.add(job.args[0])
|
video = job.args[0]
|
||||||
|
videos[video.id] = video
|
||||||
|
|
||||||
videos = []
|
return list(videos.values())
|
||||||
for v_id in v_ids:
|
|
||||||
try:
|
|
||||||
videos.append(Video.objects.get(id=v_id))
|
|
||||||
except ObjectDoesNotExist:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return videos
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from ucast.models import Channel, Video
|
from ucast.models import Channel, Video
|
||||||
from ucast.service import storage, util, videoutil, youtube
|
from ucast.service import storage, util, youtube
|
||||||
|
|
||||||
|
|
||||||
class ChannelAlreadyExistsException(Exception):
|
class ChannelAlreadyExistsException(Exception):
|
||||||
|
@ -12,10 +12,8 @@ class ChannelAlreadyExistsException(Exception):
|
||||||
def download_channel_avatar(channel: Channel):
|
def download_channel_avatar(channel: Channel):
|
||||||
store = storage.Storage()
|
store = storage.Storage()
|
||||||
channel_folder = store.get_or_create_channel_folder(channel.slug)
|
channel_folder = store.get_or_create_channel_folder(channel.slug)
|
||||||
util.download_image_file(
|
util.download_image_file(channel.avatar_url, channel_folder.file_avatar)
|
||||||
channel.avatar_url, channel_folder.file_avatar, videoutil.AVATAR_SIZE
|
util.resize_avatar(channel_folder.file_avatar, channel_folder.file_avatar_sm)
|
||||||
)
|
|
||||||
videoutil.resize_avatar(channel_folder.file_avatar, channel_folder.file_avatar_sm)
|
|
||||||
|
|
||||||
|
|
||||||
def create_channel(channel_str: str) -> Channel:
|
def create_channel(channel_str: str) -> Channel:
|
||||||
|
|
|
@ -85,12 +85,8 @@ class Cache:
|
||||||
if dirname == "yt_dlp":
|
if dirname == "yt_dlp":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
ctime = os.path.getctime(dirname)
|
||||||
ctime = os.path.getctime(dirname)
|
|
||||||
# Cache folders may get removed by concurrent jobs
|
|
||||||
except FileNotFoundError:
|
|
||||||
continue
|
|
||||||
age = datetime.now() - datetime.fromtimestamp(ctime)
|
age = datetime.now() - datetime.fromtimestamp(ctime)
|
||||||
|
|
||||||
if age > timedelta(days=1):
|
if age > timedelta(days=1):
|
||||||
shutil.rmtree(self.dir_cache / dirname, ignore_errors=True)
|
shutil.rmtree(self.dir_cache / dirname)
|
||||||
|
|
|
@ -4,7 +4,7 @@ import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Optional, Tuple, Union
|
from typing import Any, Union
|
||||||
from urllib import parse
|
from urllib import parse
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
@ -12,6 +12,9 @@ import slugify
|
||||||
from django.utils import timezone
|
from django.utils import timezone
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
AVATAR_SM_WIDTH = 100
|
||||||
|
THUMBNAIL_SM_WIDTH = 360
|
||||||
|
|
||||||
EMOJI_PATTERN = re.compile(
|
EMOJI_PATTERN = re.compile(
|
||||||
"["
|
"["
|
||||||
"\U0001F1E0-\U0001F1FF" # flags (iOS)
|
"\U0001F1E0-\U0001F1FF" # flags (iOS)
|
||||||
|
@ -36,38 +39,13 @@ def download_file(url: str, download_path: Path):
|
||||||
open(download_path, "wb").write(r.content)
|
open(download_path, "wb").write(r.content)
|
||||||
|
|
||||||
|
|
||||||
def resize_image(img: Image, resize: Tuple[int, int]):
|
def download_image_file(url: str, download_path: Path):
|
||||||
if img.size == resize:
|
|
||||||
return img
|
|
||||||
|
|
||||||
w_ratio = resize[0] / img.width
|
|
||||||
h_ratio = resize[1] / img.height
|
|
||||||
box = None
|
|
||||||
|
|
||||||
# Too tall
|
|
||||||
if h_ratio < w_ratio:
|
|
||||||
crop_height = int(img.width / resize[0] * resize[1])
|
|
||||||
border = int((img.height - crop_height) / 2)
|
|
||||||
box = (0, border, img.width, img.height - border)
|
|
||||||
# Too wide
|
|
||||||
elif w_ratio < h_ratio:
|
|
||||||
crop_width = int(img.height / resize[1] * resize[0])
|
|
||||||
border = int((img.width - crop_width) / 2)
|
|
||||||
box = (border, 0, img.width - border, img.height)
|
|
||||||
|
|
||||||
return img.resize(resize, Image.Resampling.LANCZOS, box)
|
|
||||||
|
|
||||||
|
|
||||||
def download_image_file(
|
|
||||||
url: str, download_path: Path, resize: Optional[Tuple[int, int]] = None
|
|
||||||
):
|
|
||||||
"""
|
"""
|
||||||
Download an image and convert it to the type given
|
Download an image and convert it to the type given
|
||||||
by the path.
|
by the path.
|
||||||
|
|
||||||
:param url: Image URL
|
:param url: Image URL
|
||||||
:param download_path: Download path
|
:param download_path: Download path
|
||||||
:param resize: target image size (set to None for no resizing)
|
|
||||||
"""
|
"""
|
||||||
r = requests.get(url, allow_redirects=True)
|
r = requests.get(url, allow_redirects=True)
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
|
@ -77,16 +55,30 @@ def download_image_file(
|
||||||
if img_ext == "jpeg":
|
if img_ext == "jpeg":
|
||||||
img_ext = "jpg"
|
img_ext = "jpg"
|
||||||
|
|
||||||
do_resize = resize and img.size != resize
|
if "." + img_ext == download_path.suffix:
|
||||||
if do_resize:
|
|
||||||
img = resize_image(img, resize)
|
|
||||||
|
|
||||||
if not do_resize and "." + img_ext == download_path.suffix:
|
|
||||||
open(download_path, "wb").write(r.content)
|
open(download_path, "wb").write(r.content)
|
||||||
else:
|
else:
|
||||||
img.save(download_path)
|
img.save(download_path)
|
||||||
|
|
||||||
|
|
||||||
|
def resize_avatar(original_file: Path, new_file: Path):
|
||||||
|
avatar = Image.open(original_file)
|
||||||
|
avatar_new_height = int(AVATAR_SM_WIDTH / avatar.width * avatar.height)
|
||||||
|
avatar = avatar.resize(
|
||||||
|
(AVATAR_SM_WIDTH, avatar_new_height), Image.Resampling.LANCZOS
|
||||||
|
)
|
||||||
|
avatar.save(new_file)
|
||||||
|
|
||||||
|
|
||||||
|
def resize_thumbnail(original_file: Path, new_file: Path):
|
||||||
|
thumbnail = Image.open(original_file)
|
||||||
|
tn_new_height = int(THUMBNAIL_SM_WIDTH / thumbnail.width * thumbnail.height)
|
||||||
|
thumbnail = thumbnail.resize(
|
||||||
|
(THUMBNAIL_SM_WIDTH, tn_new_height), Image.Resampling.LANCZOS
|
||||||
|
)
|
||||||
|
thumbnail.save(new_file)
|
||||||
|
|
||||||
|
|
||||||
def get_slug(text: str) -> str:
|
def get_slug(text: str) -> str:
|
||||||
return slugify.slugify(text, lowercase=False, separator="_")
|
return slugify.slugify(text, lowercase=False, separator="_")
|
||||||
|
|
||||||
|
|
|
@ -2,12 +2,6 @@ from datetime import date
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from mutagen import id3
|
from mutagen import id3
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
AVATAR_SM_WIDTH = 100
|
|
||||||
THUMBNAIL_SM_WIDTH = 360
|
|
||||||
THUMBNAIL_SIZE = (1280, 720)
|
|
||||||
AVATAR_SIZE = (900, 900)
|
|
||||||
|
|
||||||
|
|
||||||
def tag_audio(
|
def tag_audio(
|
||||||
|
@ -32,21 +26,3 @@ def tag_audio(
|
||||||
encoding=3, mime="image/png", type=3, desc="Cover", data=albumart.read()
|
encoding=3, mime="image/png", type=3, desc="Cover", data=albumart.read()
|
||||||
)
|
)
|
||||||
tag.save()
|
tag.save()
|
||||||
|
|
||||||
|
|
||||||
def resize_avatar(original_file: Path, new_file: Path):
|
|
||||||
avatar = Image.open(original_file)
|
|
||||||
avatar_new_height = int(AVATAR_SM_WIDTH / avatar.width * avatar.height)
|
|
||||||
avatar = avatar.resize(
|
|
||||||
(AVATAR_SM_WIDTH, avatar_new_height), Image.Resampling.LANCZOS
|
|
||||||
)
|
|
||||||
avatar.save(new_file)
|
|
||||||
|
|
||||||
|
|
||||||
def resize_thumbnail(original_file: Path, new_file: Path):
|
|
||||||
thumbnail = Image.open(original_file)
|
|
||||||
tn_new_height = int(THUMBNAIL_SM_WIDTH / thumbnail.width * thumbnail.height)
|
|
||||||
thumbnail = thumbnail.resize(
|
|
||||||
(THUMBNAIL_SM_WIDTH, tn_new_height), Image.Resampling.LANCZOS
|
|
||||||
)
|
|
||||||
thumbnail.save(new_file)
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ import feedparser
|
||||||
import requests
|
import requests
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
|
|
||||||
from ucast.service import scrapetube, storage, util, videoutil
|
from ucast.service import scrapetube, storage, util
|
||||||
|
|
||||||
CHANID_REGEX = re.compile(r"""[-_a-zA-Z\d]{24}""")
|
CHANID_REGEX = re.compile(r"""[-_a-zA-Z\d]{24}""")
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ def download_thumbnail(vinfo: VideoDetails, download_path: Path):
|
||||||
logging.info(f"downloading thumbnail {url}...")
|
logging.info(f"downloading thumbnail {url}...")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
util.download_image_file(url, download_path, videoutil.THUMBNAIL_SIZE)
|
util.download_image_file(url, download_path)
|
||||||
return
|
return
|
||||||
except requests.HTTPError:
|
except requests.HTTPError:
|
||||||
logging.warning(f"downloading thumbnail {url} failed")
|
logging.warning(f"downloading thumbnail {url} failed")
|
||||||
|
|
|
@ -7,7 +7,7 @@ from yt_dlp.utils import DownloadError
|
||||||
|
|
||||||
from ucast import queue
|
from ucast import queue
|
||||||
from ucast.models import Channel, Video
|
from ucast.models import Channel, Video
|
||||||
from ucast.service import controller, cover, storage, videoutil, youtube
|
from ucast.service import controller, cover, storage, util, videoutil, youtube
|
||||||
|
|
||||||
|
|
||||||
def _load_scraped_video(vid: youtube.VideoScraped, channel: Channel):
|
def _load_scraped_video(vid: youtube.VideoScraped, channel: Channel):
|
||||||
|
@ -105,7 +105,7 @@ def download_video(v_id: int):
|
||||||
# Download/convert thumbnails
|
# Download/convert thumbnails
|
||||||
tn_path = channel_folder.get_thumbnail(video.slug)
|
tn_path = channel_folder.get_thumbnail(video.slug)
|
||||||
youtube.download_thumbnail(details, tn_path)
|
youtube.download_thumbnail(details, tn_path)
|
||||||
videoutil.resize_thumbnail(tn_path, channel_folder.get_thumbnail(video.slug, True))
|
util.resize_thumbnail(tn_path, channel_folder.get_thumbnail(video.slug, True))
|
||||||
cover_file = channel_folder.get_cover(video.slug)
|
cover_file = channel_folder.get_cover(video.slug)
|
||||||
|
|
||||||
if not os.path.isfile(channel_folder.file_avatar):
|
if not os.path.isfile(channel_folder.file_avatar):
|
||||||
|
|
|
@ -2,7 +2,6 @@ import os
|
||||||
|
|
||||||
from django.db.models import ObjectDoesNotExist
|
from django.db.models import ObjectDoesNotExist
|
||||||
from django.utils import timezone
|
from django.utils import timezone
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
from ucast import queue
|
from ucast import queue
|
||||||
from ucast.models import Channel, Video
|
from ucast.models import Channel, Video
|
||||||
|
@ -52,32 +51,6 @@ def recreate_covers():
|
||||||
queue.enqueue(recreate_cover, video.id)
|
queue.enqueue(recreate_cover, video.id)
|
||||||
|
|
||||||
|
|
||||||
def resize_thumbnail(v_id: int):
|
|
||||||
try:
|
|
||||||
video = Video.objects.get(id=v_id)
|
|
||||||
except ObjectDoesNotExist:
|
|
||||||
return
|
|
||||||
|
|
||||||
store = storage.Storage()
|
|
||||||
cf = store.get_channel_folder(video.channel.slug)
|
|
||||||
|
|
||||||
tn_path = cf.get_thumbnail(video.slug)
|
|
||||||
tn_img = Image.open(tn_path)
|
|
||||||
if tn_img.size != videoutil.THUMBNAIL_SIZE:
|
|
||||||
tn_img = util.resize_image(tn_img, videoutil.THUMBNAIL_SIZE)
|
|
||||||
tn_img.save(tn_path)
|
|
||||||
videoutil.resize_thumbnail(tn_path, cf.get_thumbnail(video.slug, True))
|
|
||||||
|
|
||||||
|
|
||||||
def resize_thumbnails():
|
|
||||||
"""
|
|
||||||
Used to unify thumbnail sizes for the existing collection before v0.4.2.
|
|
||||||
Needs to be triggered manually: ``manage.py rqenqueue ucast.tasks.library.resize_thumbnails``.
|
|
||||||
"""
|
|
||||||
for video in Video.objects.filter(downloaded__isnull=False):
|
|
||||||
queue.enqueue(resize_thumbnail, video.id)
|
|
||||||
|
|
||||||
|
|
||||||
def update_file_storage():
|
def update_file_storage():
|
||||||
store = storage.Storage()
|
store = storage.Storage()
|
||||||
|
|
||||||
|
@ -102,7 +75,7 @@ def update_file_storage():
|
||||||
return
|
return
|
||||||
|
|
||||||
if not os.path.isfile(tn_file_sm):
|
if not os.path.isfile(tn_file_sm):
|
||||||
videoutil.resize_thumbnail(tn_file, tn_file_sm)
|
util.resize_thumbnail(tn_file, tn_file_sm)
|
||||||
|
|
||||||
if not os.path.isfile(cover_file):
|
if not os.path.isfile(cover_file):
|
||||||
recreate_cover(video)
|
recreate_cover(video)
|
||||||
|
@ -128,12 +101,8 @@ def update_channel_info(ch_id: int):
|
||||||
store = storage.Storage()
|
store = storage.Storage()
|
||||||
channel_folder = store.get_or_create_channel_folder(channel.slug)
|
channel_folder = store.get_or_create_channel_folder(channel.slug)
|
||||||
|
|
||||||
util.download_image_file(
|
util.download_image_file(channel_data.avatar_url, channel_folder.file_avatar)
|
||||||
channel_data.avatar_url, channel_folder.file_avatar, videoutil.AVATAR_SIZE
|
util.resize_avatar(channel_folder.file_avatar, channel_folder.file_avatar_sm)
|
||||||
)
|
|
||||||
videoutil.resize_avatar(
|
|
||||||
channel_folder.file_avatar, channel_folder.file_avatar_sm
|
|
||||||
)
|
|
||||||
|
|
||||||
channel.avatar_url = channel_data.avatar_url
|
channel.avatar_url = channel_data.avatar_url
|
||||||
|
|
||||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 196 KiB |
Binary file not shown.
Before Width: | Height: | Size: 197 KiB |
Binary file not shown.
Before Width: | Height: | Size: 199 KiB |
|
@ -48,7 +48,7 @@ def _create_download_dir() -> Tuple[Path, TemporaryDirectory]:
|
||||||
shutil.copyfile(
|
shutil.copyfile(
|
||||||
tests.DIR_TESTFILES / "avatar" / f"{avatar}.jpg", cf.file_avatar
|
tests.DIR_TESTFILES / "avatar" / f"{avatar}.jpg", cf.file_avatar
|
||||||
)
|
)
|
||||||
videoutil.resize_avatar(cf.file_avatar, cf.file_avatar_sm)
|
util.resize_avatar(cf.file_avatar, cf.file_avatar_sm)
|
||||||
|
|
||||||
return tmpdir, tmpdir_o
|
return tmpdir, tmpdir_o
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ def _add_download_dir_content():
|
||||||
|
|
||||||
shutil.copyfile(tests.DIR_TESTFILES / "audio" / "audio1.mp3", file_audio)
|
shutil.copyfile(tests.DIR_TESTFILES / "audio" / "audio1.mp3", file_audio)
|
||||||
shutil.copyfile(tests.DIR_TESTFILES / "thumbnail" / f"{vid}.webp", file_tn)
|
shutil.copyfile(tests.DIR_TESTFILES / "thumbnail" / f"{vid}.webp", file_tn)
|
||||||
videoutil.resize_thumbnail(file_tn, cf.get_thumbnail(video_slug, True))
|
util.resize_thumbnail(file_tn, cf.get_thumbnail(video_slug, True))
|
||||||
cover.create_cover_file(
|
cover.create_cover_file(
|
||||||
file_tn,
|
file_tn,
|
||||||
cf.file_avatar,
|
cf.file_avatar,
|
||||||
|
|
|
@ -55,22 +55,28 @@ def test_download_image_file_conv():
|
||||||
assert diff.getbbox() is None
|
assert diff.getbbox() is None
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
def test_resize_avatar():
|
||||||
"src_file",
|
tmpdir_o = tempfile.TemporaryDirectory()
|
||||||
[
|
tmpdir = Path(tmpdir_o.name)
|
||||||
"normal",
|
source_file = tests.DIR_TESTFILES / "avatar" / "a1.jpg"
|
||||||
"tall",
|
resized_file = tmpdir / "avatar.webp"
|
||||||
"wide",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_resize_image(src_file: str):
|
|
||||||
src_path = tests.DIR_TESTFILES / "img" / f"{src_file}.png"
|
|
||||||
src_img = Image.open(src_path)
|
|
||||||
resized = util.resize_image(src_img, (500, 250))
|
|
||||||
|
|
||||||
normal_img = Image.open(tests.DIR_TESTFILES / "img" / "normal.png")
|
util.resize_avatar(source_file, resized_file)
|
||||||
diff = ImageChops.difference(resized, normal_img)
|
|
||||||
assert diff.getbbox() is None
|
resized_avatar = Image.open(resized_file)
|
||||||
|
assert resized_avatar.size == (100, 100)
|
||||||
|
|
||||||
|
|
||||||
|
def test_resize_thumbnail():
|
||||||
|
tmpdir_o = tempfile.TemporaryDirectory()
|
||||||
|
tmpdir = Path(tmpdir_o.name)
|
||||||
|
source_file = tests.DIR_TESTFILES / "thumbnail" / "t1.webp"
|
||||||
|
resized_file = tmpdir / "thumbnail.webp"
|
||||||
|
|
||||||
|
util.resize_thumbnail(source_file, resized_file)
|
||||||
|
|
||||||
|
resized_thumbnail = Image.open(resized_file)
|
||||||
|
assert resized_thumbnail.size == (360, 202)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
|
|
@ -57,27 +57,3 @@ https://youtu.be/ZPxEr4YdWt8"""
|
||||||
expected_cover_img = Image.open(cover_file)
|
expected_cover_img = Image.open(cover_file)
|
||||||
diff = ImageChops.difference(tag_cover_img, expected_cover_img)
|
diff = ImageChops.difference(tag_cover_img, expected_cover_img)
|
||||||
assert diff.getbbox() is None
|
assert diff.getbbox() is None
|
||||||
|
|
||||||
|
|
||||||
def test_resize_avatar():
|
|
||||||
tmpdir_o = tempfile.TemporaryDirectory()
|
|
||||||
tmpdir = Path(tmpdir_o.name)
|
|
||||||
source_file = tests.DIR_TESTFILES / "avatar" / "a1.jpg"
|
|
||||||
resized_file = tmpdir / "avatar.webp"
|
|
||||||
|
|
||||||
videoutil.resize_avatar(source_file, resized_file)
|
|
||||||
|
|
||||||
resized_avatar = Image.open(resized_file)
|
|
||||||
assert resized_avatar.size == (100, 100)
|
|
||||||
|
|
||||||
|
|
||||||
def test_resize_thumbnail():
|
|
||||||
tmpdir_o = tempfile.TemporaryDirectory()
|
|
||||||
tmpdir = Path(tmpdir_o.name)
|
|
||||||
source_file = tests.DIR_TESTFILES / "thumbnail" / "t1.webp"
|
|
||||||
resized_file = tmpdir / "thumbnail.webp"
|
|
||||||
|
|
||||||
videoutil.resize_thumbnail(source_file, resized_file)
|
|
||||||
|
|
||||||
resized_thumbnail = Image.open(resized_file)
|
|
||||||
assert resized_thumbnail.size == (360, 202)
|
|
||||||
|
|
|
@ -92,10 +92,7 @@ def videos(request: http.HttpRequest, channel: str):
|
||||||
template_name = "ucast/videos_items.html"
|
template_name = "ucast/videos_items.html"
|
||||||
|
|
||||||
n_pending = Video.objects.filter(
|
n_pending = Video.objects.filter(
|
||||||
channel=chan,
|
channel=chan, downloaded__isnull=True, is_deleted=False
|
||||||
downloaded__isnull=True,
|
|
||||||
is_deleted=False,
|
|
||||||
**chan.vfilter_args(),
|
|
||||||
).count()
|
).count()
|
||||||
|
|
||||||
return render(
|
return render(
|
||||||
|
|
Loading…
Add table
Reference in a new issue