From 08e3b32866cd6ebd96cb387a71f217d68086b2a5 Mon Sep 17 00:00:00 2001 From: SrGooglo Date: Mon, 16 Jun 2025 23:16:14 +0000 Subject: [PATCH] remove _debug --- .../src/pages/_debug/audiomatcher/index.jsx | 92 ------------------- .../src/pages/_debug/audiomatcher/worker.js | 70 -------------- .../src/pages/_debug/audiometadata/index.jsx | 47 ---------- packages/app/src/pages/_debug/loqui/index.jsx | 49 ---------- .../_debug/videosegmentedupload/index.jsx | 46 ---------- 5 files changed, 304 deletions(-) delete mode 100644 packages/app/src/pages/_debug/audiomatcher/index.jsx delete mode 100644 packages/app/src/pages/_debug/audiomatcher/worker.js delete mode 100644 packages/app/src/pages/_debug/audiometadata/index.jsx delete mode 100644 packages/app/src/pages/_debug/loqui/index.jsx delete mode 100644 packages/app/src/pages/_debug/videosegmentedupload/index.jsx diff --git a/packages/app/src/pages/_debug/audiomatcher/index.jsx b/packages/app/src/pages/_debug/audiomatcher/index.jsx deleted file mode 100644 index cad9acbc..00000000 --- a/packages/app/src/pages/_debug/audiomatcher/index.jsx +++ /dev/null @@ -1,92 +0,0 @@ -import React, { useState, useEffect, useRef } from "react"; -import Hls from "hls.js" - -const exampleData = { - video: "https://im-fa.manifest.tidal.com/1/manifests/CAESCTE5Njg2MTQ0NCIWd05QUkh1YTIyOGRXTUVUdmFxbThQdyIWZE05ZHNYTFNkTEhaODdmTUxQMDhGQSIWS0dfYTZubHUtcTUydVZMenRyOTJwQSIWLWU1NHRpanJlNzZhSjdMcXVoQ05idyIWenRCWnZEYmpia1hvNS14UUowWFl1USIWdFRHY20ycFNpVTktaHBtVDlzUlNvdyIWdVJDMlNqMFJQYWVMSnN6NWRhRXZtdyIWZnNYUWZpNk01LUdpeUV3dE9JNTZ2dygBMAJQAQ.m3u8?token=1738270941~MjEyMTc0MTk0NTlmNjdiY2RkNjljYzc0NzU1NGRmZDcxMGJhNDI2Mg==", - audio: "https://sp-pr-fa.audio.tidal.com/mediatracks/CAEaKwgDEidmMmE5YjEyYTQ5ZTQ4YWFkZDdhOTY0YzBmZTdhZTY1ZV82MS5tcDQ/0.flac?token=1738270937~Y2ViYjZiNmYyZmVjN2JhNmYzN2ViMWEzOTcwNzQ3NDdkNzA5YzhhZg==" -} - -function AudioSyncApp() { - const videoRef = useRef(null); - const audioRef = useRef(null); - const [worker, setWorker] = useState(null); - const [startTime, setStartTime] = useState(null); - const audioCtxRef = useRef(null); - const hlsRef = useRef(null); - - // Configurar HLS para el video - useEffect(() => { - if (Hls.isSupported()) { - const hls = new Hls({ enableWorker: false, xhrSetup: (xhr) => xhr.withCredentials = false }); - hlsRef.current = hls; - hls.loadSource(exampleData.video); - hls.attachMedia(videoRef.current); - } else if (videoRef.current.canPlayType("application/vnd.apple.mpegurl")) { - videoRef.current.src = exampleData.video; - } - - return () => { - if (hlsRef.current) hlsRef.current.destroy(); - }; - }, []); - - // Inicializar Web Audio y Worker - useEffect(() => { - audioCtxRef.current = new (window.AudioContext || window.webkitAudioContext)(); - const newWorker = new Worker(new URL("./worker.js", import.meta.url)); - newWorker.onmessage = (event) => { - setStartTime(event.data.offset); - }; - setWorker(newWorker); - - return () => newWorker.terminate(); - }, []); - - // Manejar la sincronización - const handleSync = async () => { - try { - // 1. Obtener buffers de audio - const [videoBuffer, audioBuffer] = await Promise.all([ - fetch(exampleData.video, { mode: "cors" }).then(r => r.arrayBuffer()), - fetch(exampleData.audio, { mode: "cors" }).then(r => r.arrayBuffer()) - ]); - - // 2. Decodificar - const [videoAudio, songAudio] = await Promise.all([ - audioCtxRef.current.decodeAudioData(videoBuffer), - audioCtxRef.current.decodeAudioData(audioBuffer) - ]); - - // 3. Enviar al Worker - worker.postMessage( - { videoBuffer: videoAudio, audioBuffer: songAudio }, - [videoAudio, songAudio] - ); - } catch (error) { - console.error("Error de decodificación:", error); - } - }; - - return ( -
-
- ); -} - -export default AudioSyncApp; \ No newline at end of file diff --git a/packages/app/src/pages/_debug/audiomatcher/worker.js b/packages/app/src/pages/_debug/audiomatcher/worker.js deleted file mode 100644 index 5d500af4..00000000 --- a/packages/app/src/pages/_debug/audiomatcher/worker.js +++ /dev/null @@ -1,70 +0,0 @@ -self.onmessage = async (event) => { - const { videoBuffer, audioBuffer } = event.data; - const SAMPLE_RATE = 44100; - - // Extraer energía en rango de frecuencias - const getEnergy = (buffer, freqRange) => { - const offlineCtx = new OfflineAudioContext(1, buffer.length, SAMPLE_RATE); - const source = offlineCtx.createBufferSource(); - source.buffer = buffer; - - const analyser = offlineCtx.createAnalyser(); - analyser.fftSize = 4096; - source.connect(analyser); - analyser.connect(offlineCtx.destination); - source.start(); - - return offlineCtx.startRendering().then(() => { - const data = new Float32Array(analyser.frequencyBinCount); - analyser.getFloatFrequencyData(data); - - const startBin = Math.floor(freqRange[0] * analyser.fftSize / SAMPLE_RATE); - const endBin = Math.floor(freqRange[1] * analyser.fftSize / SAMPLE_RATE); - return data.slice(startBin, endBin); - }); - }; - - // Cross-correlación optimizada - const crossCorrelate = (videoFeatures, audioFeatures) => { - let maxCorr = -Infinity; - let bestOffset = 0; - - for (let i = 0; i < videoFeatures.length - audioFeatures.length; i++) { - let corr = 0; - for (let j = 0; j < audioFeatures.length; j++) { - corr += videoFeatures[i + j] * audioFeatures[j]; - } - if (corr > maxCorr) { - maxCorr = corr; - bestOffset = i; - } - } - return bestOffset; - }; - - // Procesar características - try { - const [videoBass, audioBass] = await Promise.all([ - getEnergy(videoBuffer, [60, 250]), // Bajos - getEnergy(audioBuffer, [60, 250]) - ]); - - const [videoVoice, audioVoice] = await Promise.all([ - getEnergy(videoBuffer, [300, 3400]), // Voces - getEnergy(audioBuffer, [300, 3400]) - ]); - - // Combinar características (peso dinámico) - const isElectronic = audioVoice.reduce((a, b) => a + b) < audioBass.reduce((a, b) => a + b); - const weight = isElectronic ? 0.8 : 0.4; - - const videoFeatures = videoBass.map((v, i) => weight * v + (1 - weight) * videoVoice[i]); - const audioFeatures = audioBass.map((v, i) => weight * v + (1 - weight) * audioVoice[i]); - - // Calcular offset - const offset = crossCorrelate(videoFeatures, audioFeatures); - self.postMessage({ offset: offset / SAMPLE_RATE }); - } catch (error) { - self.postMessage({ error: "Error en el procesamiento" }); - } -}; \ No newline at end of file diff --git a/packages/app/src/pages/_debug/audiometadata/index.jsx b/packages/app/src/pages/_debug/audiometadata/index.jsx deleted file mode 100644 index c9aea4c8..00000000 --- a/packages/app/src/pages/_debug/audiometadata/index.jsx +++ /dev/null @@ -1,47 +0,0 @@ -import TrackManifest from "@cores/player/classes/TrackManifest" - -const D_Manifest = () => { - const [manifest, setManifest] = React.useState(null) - - function selectLocalFile() { - const input = document.createElement("input") - input.type = "file" - input.accept = "audio/*" - input.onchange = (e) => { - loadManifest(e.target.files[0]) - } - input.click() - } - - async function loadManifest(file) { - let track = new TrackManifest({ file: file }) - - await track.initialize() - - console.log(track) - - setManifest(track) - } - - return ( -
-

Select a local file to view & create a track manifest

- - - - {manifest?.cover && ( - Cover - )} - - - {JSON.stringify(manifest)} - -
- ) -} - -export default D_Manifest diff --git a/packages/app/src/pages/_debug/loqui/index.jsx b/packages/app/src/pages/_debug/loqui/index.jsx deleted file mode 100644 index 913d974a..00000000 --- a/packages/app/src/pages/_debug/loqui/index.jsx +++ /dev/null @@ -1,49 +0,0 @@ -import React from "react" - -const defaultURL = "ws://localhost:19236" - -function useLoquiWs() { - const [socket, setSocket] = React.useState(null) - - function create() { - const s = new WebSocket(defaultURL) - - s.addEventListener("open", (event) => { - console.log("WebSocket connection opened") - }) - - s.addEventListener("close", (event) => { - console.log("WebSocket connection closed") - }) - - s.addEventListener("error", (event) => { - console.log("WebSocket error", event) - }) - - s.addEventListener("message", (event) => { - console.log("Message from server ", event.data) - }) - - setSocket(s) - } - - React.useEffect(() => { - create() - - return () => { - if (socket) { - socket.close() - } - } - }, []) - - return [socket] -} - -const Loqui = () => { - const [socket] = useLoquiWs() - - return
{defaultURL}
-} - -export default Loqui diff --git a/packages/app/src/pages/_debug/videosegmentedupload/index.jsx b/packages/app/src/pages/_debug/videosegmentedupload/index.jsx deleted file mode 100644 index 643e5f23..00000000 --- a/packages/app/src/pages/_debug/videosegmentedupload/index.jsx +++ /dev/null @@ -1,46 +0,0 @@ -import React from "react" -import { Progress } from "antd" -import UploadButton from "@components/UploadButton" - -const VideoSegmentedUpload = () => { - const [result, setResult] = React.useState(null) - const [progress, setProgress] = React.useState(null) - - return ( -
- { - setResult(response) - }} - onProgress={(id, progress) => { - setProgress({ - id, - progress, - }) - }} - accept={["video/*"]} - headers={{ - transmux: "mq-hls", - }} - > - Upload video - - - {progress && ( -
-

Progress

- -
- )} - - {result && {JSON.stringify(result, null, 2)}} -
- ) -} - -export default VideoSegmentedUpload