Commit f5bb479e by haojie

mp3测试

parent f21881b9
......@@ -21,6 +21,7 @@
"dayjs": "^1.10.6",
"default-passive-events": "^2.0.0",
"js-cookie": "^3.0.1",
"lamejs": "^1.2.1",
"tdesign-vue-next": "^0.22.1",
"uuid": "^9.0.0",
"vue": "^3.2.31",
......
......@@ -10,8 +10,13 @@ export default function () {
// 洗稿id
const currentConfuseId = ref('');
// 是否暂停洗稿
const stopConfuse = ref(false);
const openConfuseInterval = (id: any = '') => {
console.log('开启轮询,洗稿回调');
// 必须先清空定时器
closeConfuseInterval();
confuseInterval.value = window.setInterval(() => {
currentStartConfuseBack(id ? id : currentConfuseId.value);
}, 3000);
......@@ -50,9 +55,14 @@ export default function () {
task_id: id,
});
if (res.code == 0 && res.data.length) {
console.log('洗稿回调成功');
// 是否为error
if (res.data[0] === 'error') {
stopConfuse.value = true;
} else {
console.log('洗稿回调成功');
confuseList.value = res.data;
}
closeConfuseInterval();
confuseList.value = res.data;
} else {
console.log('洗稿还没有回调', id);
console.log(res.data);
......@@ -71,6 +81,7 @@ export default function () {
});
return {
stopConfuse,
confuseList,
currentConfuseId,
currentStartConfuse,
......
// 低版本浏览器兼容
import './utils/polyfills';
import { createApp } from 'vue';
// 组件库按需引入---注意--以后要加新组件,记得去文件里引入
import TDesign from './utils/Tdesign';
......@@ -10,6 +13,7 @@ import '@/style/index.less';
// 谷歌浏览器关于滚动事件警告去除
import 'default-passive-events';
import App from './App.vue';
// font
// import '@/style/font-family.css';
// 全局样式
......
......@@ -33,7 +33,7 @@ import { CONFUSE_STATUS } from '@/service/Live';
import { processTextCallback } from '@/hooks/useScript';
import { scriptTypeText } from '@/service/CreateLive';
import { writeLog } from '@/utils/pyqt';
const { currentConfuseId, confuseList, currentStartConfuse, openConfuseInterval } = useConfuse();
const { currentConfuseId, confuseList, stopConfuse, currentStartConfuse, openConfuseInterval } = useConfuse();
const { openInterval: confuseInterval } = processTextCallback();
const store = useStore();
......@@ -272,7 +272,8 @@ const currentTimeChange = (index: number, value: number) => {
typeof liveDetail.value.phonetic_timbres_id === 'number' &&
typeof liveDetail.value.tone_id === 'number' &&
liveDetail.value.is_disorganize &&
liveDetail.value.type == scriptTypeText
liveDetail.value.type == scriptTypeText &&
!stopConfuse.value
) {
console.log(row.videoIndex, '当前videoIndex');
currentVideoRow.confuse = CONFUSE_STATUS.CONFUSE_STATUS_PROGRESS;
......
......@@ -73,15 +73,8 @@ import HomeSvg from '@/assets/svg/createLive/home.svg';
import InteractSvg from '@/assets/svg/createLive/interact.svg';
import ScriptsSvg from '@/assets/svg/createLive/scripts.svg';
import { computed, onBeforeMount, ref, onBeforeUnmount, onActivated } from 'vue';
import {
getElBounding,
show_message,
DataType,
dimensionalConvert,
ecursionDeepCopy,
getDurationOfAudioFile,
getFile,
} from '@/utils/tool';
import { getElBounding, show_message, DataType, dimensionalConvert, ecursionDeepCopy, getFile } from '@/utils/tool';
import { getDurationOfAudioFile } from '@/utils/audio';
import { useStore } from 'vuex';
import { createLiveKeys, scriptTypeText, scriptTypePhonetics, mergeSameAudio, filterFiled } from '@/service/CreateLive';
import { getLiveTaskInfo, createDrafts, getDraftsDetail, liveTts, createLiveTask } from '@/utils/api/userApi';
......
......@@ -77,7 +77,7 @@ import CustomDialog from '@/components/Dialog.vue';
import CustomInput from '@/components/input/index.vue';
import routerConfig from '@/router/tool';
import { useRouter } from 'vue-router';
import { getDigitalPeopleList } from '@/service/Common';
import { getDigitalPeopleList, uploadToAly } from '@/service/Common';
import Button from '@/components/Button.vue';
import { callPyjsInWindow } from '@/utils/pyqt';
import { jumpToCreateLivePage } from '@/router/jump';
......@@ -193,15 +193,27 @@ const getList = async () => {
digitalPeopleList.loading = false;
}
};
const startTest = async () => {
let list = [
'http://yunyi-live.oss-cn-hangzhou.aliyuncs.com/upload/1/2023-08-114e482462-6047-47be-a30c-b85a17c95665.mp3',
'http://yunyi-live.oss-cn-hangzhou.aliyuncs.com/upload/1/2023-08-11efe3f1ea-b445-42e2-93b0-39a70fb7c6f5.mp3',
'http://yunyi-live.oss-cn-hangzhou.aliyuncs.com/upload/1/2023-08-11dd2372d8-73ff-4526-bf59-b1618a3f218f.mp3',
// 'http://nls-cloud-cn-shanghai.oss-cn-shanghai.aliyuncs.com/jupiter-flow/tmp/ad08fa0a70ae4ea88d11ad5e394ce045.wav?Expires=1692149777&OSSAccessKeyId=LTAIUpwNp2H7pBG5&Signature=D93qMT1DovslSOVa9oufV2cGZxE%3D',
// 'http://nls-cloud-cn-shanghai.oss-cn-shanghai.aliyuncs.com/jupiter-flow/tmp/ad08fa0a70ae4ea88d11ad5e394ce045.wav?Expires=1692149777&OSSAccessKeyId=LTAIUpwNp2H7pBG5&Signature=D93qMT1DovslSOVa9oufV2cGZxE%3D',
];
let blob = await audioMerge(list);
console.log(blob);
if (blob) {
// uploadToAly([blob]);
} else {
console.log('文件错误');
}
};
onMounted(() => {
// 获取我的数字人
getList();
// let list = [
// 'http://m10.music.126.net/20230811094657/2ca708dd710fd76bfaa7176ab0a52a01/ymusic/5353/0f0f/0358/d99739615f8e5153d77042092f07fd77.mp3',
// 'http://m10.music.126.net/20230811094657/2ca708dd710fd76bfaa7176ab0a52a01/ymusic/5353/0f0f/0358/d99739615f8e5153d77042092f07fd77.mp3',
// ];
// audioMerge(list);
startTest();
});
</script>
......
......@@ -180,7 +180,6 @@ export const uploadToAly = async (fileList: File[]) => {
},
]);
};
// 每次上传前清空数组
let alyList = [];
console.log('任务数');
console.log(fileList.length);
......
import audiobufferToWav from 'audiobuffer-to-wav';
import request from '@/utils/upLoadRequest';
import { writeLog } from '@/utils/pyqt';
// import lamejs from 'lamejs';
import { getFile } from './tool';
import audioConversion from '@/worker/audioConversion.js?worker';
export const createAudioContext = () => {
return new (window.AudioContext || window.webkitAudioContext)();
};
export async function decodeAudio(blob: Blob) {
const audioContext = createAudioContext();
const audioContext = createAudioContext();
// 获取音频文件的时长
export const getDurationOfAudioFile = (file: File) => {
return new Promise((resolve, reject) => {
const fileReader = new FileReader();
fileReader.onloadend = function () {
const arrayBuffer = fileReader.result;
audioContext.decodeAudioData(
arrayBuffer,
(audioBuffer: any) => {
resolve(audioBuffer);
},
reject,
);
const audio = new Audio();
audio.src = URL.createObjectURL(file);
audio.onloadedmetadata = () => {
resolve(audio.duration);
};
fileReader.onerror = reject;
fileReader.readAsArrayBuffer(blob);
audio.onerror = () => {
reject('无法获取音频文件的时长!');
};
});
};
// 获取音频文件的类型
export const getAudioFileType = async (url: string) => {
const audio = await getFile(url);
let type = audio.type;
return type;
};
// 将blob音频列表转换为buffer
export const audioBlobToBuffer = async (list: Blob[]) => {
let newList = [];
for (let i = 0; i < list.length; i++) {
const blob: any = list[i];
// 将blob转换为ArrayBuffer
const arrayBuffer = await blob.arrayBuffer();
// 将ArrayBuffer转换为Buffer
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
newList.push(audioBuffer);
}
return newList;
};
// 下载音频列表
export const downloadAudioList = async (filePaths: string[]) => {
let blobList = [];
await Promise.all(
filePaths.map(async (filePath: string, index: number) => {
const blob = await getFile(filePath);
return blob;
}),
).then((blobs) => {
blobList.push(...blobs); // 将解码后的音频缓冲区按顺序添加到数组中
});
return {
blobList: blobList,
};
};
function mergeBuffers(bufferList) {
let numberOfChannels = bufferList[0].numberOfChannels; // 获取音频数据的声道数
console.log(numberOfChannels);
let maxLength = Math.max.apply(
null,
bufferList.map(function (buffer) {
return buffer.length;
}),
); // 获取所有音频数据中的最大长度
let mergedBuffer = new Float32Array(numberOfChannels * maxLength);
for (let channel = 0; channel < numberOfChannels; channel++) {
let channelOffset = channel * maxLength;
bufferList.forEach(function (buffer) {
let channelData = buffer.getChannelData(channel);
for (let i = 0; i < buffer.length; i++) {
mergedBuffer[channelOffset + i] += channelData[i];
}
});
}
return mergedBuffer;
}
// mp3 worker
export const mp3Worker = async (blobList: any[]) => {
return new Promise<Blob>((reslove) => {
// worker
const worker: Worker = new audioConversion();
worker.onmessage = function (event) {
const blob = event.data;
// 结束线程
worker.terminate();
// 处理编码后的数据,例如保存为文件或进行其他操作
reslove(blob);
};
console.log('发送消息');
worker.postMessage({
blobList: blobList,
});
});
};
// 导出为 MP3
// export function exportToMp3(bufferList) {
// // 合并音频数据
// let mergedBuffer = mergeBuffers(bufferList);
// let start = performance.now();
// // 创建 MP3 编码器
// let encoder = new lamejs.Mp3Encoder(mergedBuffer.length, 44100, 128); // 使用 44100 采样率和 128kbps 比特率
// // 存储编码后的数据
// let mp3Data = [];
// // 编码音频数据
// let blockSize = 1152; // 使用默认块大小
// let bufferLength = mergedBuffer.length;
// for (let i = 0; i < bufferLength; i += blockSize) {
// let subbuffer = mergedBuffer.subarray(i, i + blockSize);
// let mp3buf = encoder.encodeBuffer(subbuffer, blockSize);
// mp3Data.push(mp3buf);
// }
// // 获取剩余的编码数据
// let mp3buf = encoder.flush();
// mp3Data.push(mp3buf);
// let end = performance.now();
// console.log(`本次合并耗时-${end - start}毫秒`);
// // 合并所有编码数据
// return new Blob(mp3Data, { type: 'audio/mpeg' });
// }
// 合并音频文件
export async function audioMerge(filePaths) {
console.log('要合并的文件');
console.log(filePaths);
export async function audioMerge(filePaths: string[]) {
if (!filePaths.length) {
console.log('没有要合并的音频');
return;
} else {
console.log('要合并的文件');
console.log(filePaths);
}
try {
// 创建一个新的音频上下文
const audioContext = createAudioContext();
// 存储每个音频文件的解码后的音频数据
const buffers = [];
let fileType = '';
// 获取文件类型
// 使用 Promise 依次解码和添加音频数据到 buffers 数组
await Promise.all(
filePaths.map(async (filePath) => {
// const response = await request.get(filePath, { responseType: 'blob' });
// const blob = response;
// // 获取文件类型
// const fileType = blob.type;
// console.log(fileType);
// // 将blob转换为ArrayBuffer
// const arrayBuffer = await blob.arrayBuffer();
// // 将ArrayBuffer转换为Buffer
// const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// return audioBuffer;
const response = await request.get(filePath, { responseType: 'arraybuffer' });
const arrayBuffer = response;
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
return audioBuffer;
}),
).then((decodedBuffers) => {
buffers.push(...decodedBuffers); // 将解码后的音频缓冲区按顺序添加到数组中
});
// 计算合并后的音频数据的长度
const totalDuration = buffers.reduce((accumulator, current) => accumulator + current.duration, 0);
const sampleRate = audioContext.sampleRate;
const numberOfChannels = buffers[0].numberOfChannels;
const channels = buffers[0].numberOfChannels;
// 创建一个新的音频缓冲区
const mergedBuffer = audioContext.createBuffer(
numberOfChannels,
Math.round(sampleRate * totalDuration),
sampleRate,
);
let offset = 0;
// 合并音频数据
buffers.forEach((buffer, index) => {
for (let channel = 0; channel < channels; channel++) {
const sourceData = buffer.getChannelData(channel);
const targetData = mergedBuffer.getChannelData(channel);
targetData.set(sourceData, offset);
const { blobList } = await downloadAudioList(filePaths);
// 先获取第一个音频的类型
if (blobList[0]) {
fileType = blobList[0].type.split('/')[1];
if (fileType == 'mpeg') {
fileType = 'mp3';
} else if (fileType.indexOf('wav') !== -1) {
fileType = 'wav';
}
offset += Math.round(buffer.duration * sampleRate);
});
console.log(buffers);
// 导出合并后的音频数据为 WAV 文件
const mergedData = exportBufferAsWav(mergedBuffer);
const blob = new Blob([mergedData], { type: 'audio/wav' });
console.log('合并完成', blob);
return blob;
}
console.log(fileType);
// wav
if (fileType == 'wav') {
// 获取buffer列表
const buffers = await audioBlobToBuffer(blobList);
// 计算合并后的音频数据的长度
const totalDuration = buffers.reduce((accumulator, current) => accumulator + current.duration, 0);
const sampleRate = audioContext.sampleRate;
const numberOfChannels = buffers[0].numberOfChannels;
const channels = buffers[0].numberOfChannels;
// 创建一个新的音频缓冲区
const mergedBuffer = audioContext.createBuffer(
numberOfChannels,
Math.round(sampleRate * totalDuration),
sampleRate,
);
let offset = 0;
// 合并音频数据
buffers.forEach((buffer, index) => {
for (let channel = 0; channel < channels; channel++) {
const sourceData = buffer.getChannelData(channel);
const targetData = mergedBuffer.getChannelData(channel);
targetData.set(sourceData, offset);
}
offset += Math.round(buffer.duration * sampleRate);
});
// 导出合并后的音频数据为 WAV 文件
const mergedData = exportBufferAsWav(mergedBuffer);
const blob = new Blob([mergedData], { type: 'audio/wav' });
console.log('合并完成', blob);
return blob;
} else if (fileType == 'mp3') {
// mp3--耗时操作,放入worker中
const blob = await mp3Worker(blobList);
return blob;
}
} catch (error) {
writeLog({
name: '音频合并失败',
......
// import Lame from 'lamejs/src/js/Lame';
// import Presets from 'lamejs/src/js/Presets';
// import GainAnalysis from 'lamejs/src/js/GainAnalysis';
// import QuantizePVT from 'lamejs/src/js/QuantizePVT';
// import Quantize from 'lamejs/src/js/Quantize';
// import Reservoir from 'lamejs/src/js/Reservoir';
// import Takehiro from 'lamejs/src/js/Takehiro';
// import MPEGMode from 'lamejs/src/js/MPEGMode';
// import BitStream from 'lamejs/src/js/BitStream';
// window.Lame = Lame;
// window.Presets = Presets;
// window.GainAnalysis = GainAnalysis;
// window.QuantizePVT = QuantizePVT;
// window.Quantize = Quantize;
// window.Reservoir = Reservoir;
// window.Takehiro = Takehiro;
// window.MPEGMode = MPEGMode;
// window.BitStream = BitStream;
......@@ -308,7 +308,7 @@ export const dimensionalConvert = (list: any[]) => {
};
export const getFile = (url: string) => {
return new Promise((resolve, reject) => {
return new Promise<Blob>((resolve, reject) => {
request
.get(url, {
responseType: 'blob',
......@@ -394,22 +394,6 @@ export function timeComparison() {
console.log('forEach循环执行时间:' + (end - start) + ' 毫秒');
}
// 获取音频文件的时长
export const getDurationOfAudioFile = (file: File) => {
return new Promise((resolve, reject) => {
const audio = new Audio();
audio.src = URL.createObjectURL(file);
audio.onloadedmetadata = () => {
resolve(audio.duration);
};
audio.onerror = () => {
reject('无法获取音频文件的时长!');
};
});
};
// 从二维数组中合并同类
export const mergedArray = (arr: any[], key: string = 'uuid', first: string = 'is_old') => {
let newList = [];
......
import lamejs from 'lamejs';
self.addEventListener('message', async (event) => {
// 接收到消息后的处理逻辑
const data = event.data;
console.log('接收到数据了', data);
const blobList = data.blobList;
const buffers = await mergeBlobs(blobList);
console.log(buffers);
// const blob = await startWorker();
// self.postMessage(blob);
});
// const startWorker = async () => {
// await exportToMp3();
// };
function mergeBlobs(blobs) {
return new Promise((resolve, reject) => {
const buffers = [];
const reader = new FileReader();
const encoder = new lamejs.Mp3Encoder(1, 44100, 128);
reader.onload = function (e) {
const buffer = new Int16Array(e.target.result);
const mp3buffer = encoder.encodeBuffer(buffer);
if (mp3buffer.length > 0) {
buffers.push(mp3buffer);
}
};
reader.onloadend = function () {
const mp3buffer = encoder.flush();
if (mp3buffer.length > 0) {
buffers.push(mp3buffer);
}
const blob = new Blob(buffers, { type: 'audio/mp3' });
resolve(blob);
};
reader.onerror = function () {
reject(new Error('Failed to read audio blobs.'));
};
function readNext(index) {
if (index >= blobs.length) {
reader.onloadend();
return;
}
const blob = blobs[index];
reader.readAsArrayBuffer(blob);
}
readNext(0);
});
}
self.addEventListener('message', (event) => {
// 接收到消息后的处理逻辑
const message = event.data;
console.log('接收到消息:', message);
});
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment