|
|
@@ -1,38 +1,73 @@
|
|
|
import Taro from "@tarojs/taro";
|
|
|
-import { ECharacterAISTATUS } from '@/types/index'
|
|
|
-let audioCtx:Taro.WebAudioContext | AudioContext;
|
|
|
-if (process.env.TARO_ENV === 'h5') {
|
|
|
- audioCtx = new AudioContext()
|
|
|
-}else {
|
|
|
- audioCtx = Taro.createWebAudioContext()
|
|
|
+import { decode } from '@/utils'
|
|
|
+
|
|
|
+
|
|
|
+function combineArrayBuffers(arrays: ArrayBuffer[], totalLength: number): ArrayBuffer {
|
|
|
+ const result = new Uint8Array(totalLength);
|
|
|
+ let offset = 0;
|
|
|
+
|
|
|
+ for (let i = 0; i < arrays.length; i++) {
|
|
|
+ const array = new Uint8Array(arrays[i]);
|
|
|
+ result.set(array, offset);
|
|
|
+ offset += array.length;
|
|
|
+ }
|
|
|
+
|
|
|
+ return result.buffer;
|
|
|
}
|
|
|
+
|
|
|
+
|
|
|
+function combineHeaderAndChunk(header:ArrayBuffer, chunk:ArrayBuffer) {
|
|
|
+ // Create a new ArrayBuffer to hold both the header and the chunk
|
|
|
+ const combinedBuffer = new ArrayBuffer(header.byteLength + chunk.byteLength);
|
|
|
+
|
|
|
+ // Create a Uint8Array view of the combined buffer
|
|
|
+ const combinedView = new Uint8Array(combinedBuffer);
|
|
|
+
|
|
|
+ // Copy the header into the combined buffer
|
|
|
+ combinedView.set(new Uint8Array(header), 0);
|
|
|
+
|
|
|
+ // Copy the chunk data after the header
|
|
|
+ combinedView.set(new Uint8Array(chunk), header.byteLength);
|
|
|
+
|
|
|
+ return combinedBuffer;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+let audioCtx = Taro.createWebAudioContext()
|
|
|
let source: AudioBufferSourceNode;
|
|
|
let enablePlay = true; // 用于中断流式播放
|
|
|
let chunks: ArrayBuffer[] = []; // 流式播放 chunks
|
|
|
-let playStatusChangedCallback:(status: ECharacterAISTATUS)=>void;
|
|
|
let requestTask = null;
|
|
|
-export const useAudioPlayer = () => {
|
|
|
- const WAV_HEADER_LENGTH = 44
|
|
|
- let totalLength = WAV_HEADER_LENGTH;
|
|
|
+let audioBase64 = ''
|
|
|
+const WAV_HEADER_LENGTH = 44
|
|
|
|
|
|
+export const useAudioPlayer = () => {
|
|
|
+
|
|
|
+ let totalLength = 0
|
|
|
let playing = false
|
|
|
let wavHeader: ArrayBuffer;
|
|
|
-
|
|
|
- const changeStatus = (status: ECharacterAISTATUS)=> {
|
|
|
- playStatusChangedCallback && playStatusChangedCallback(status)
|
|
|
- }
|
|
|
|
|
|
- const setFistChunk = (chunk: ArrayBuffer, _requestTask?: any) => {
|
|
|
+ const setFistChunk = (base64Str: string, _requestTask?: any) => {
|
|
|
if(_requestTask){
|
|
|
requestTask = _requestTask
|
|
|
}
|
|
|
enablePlay = true;
|
|
|
- changeStatus(ECharacterAISTATUS.THINKING)
|
|
|
+ audioBase64 = base64Str;
|
|
|
+ let chunk = decode(base64Str)
|
|
|
emptyQuene();
|
|
|
+ // 第一个 chunk 内包含了头信息
|
|
|
wavHeader = chunk.slice(0, WAV_HEADER_LENGTH);
|
|
|
const firstChunkData = chunk.slice(WAV_HEADER_LENGTH);
|
|
|
pushChunk2Quene(firstChunkData)
|
|
|
}
|
|
|
+
|
|
|
+ const pushBase64ToQuene = (base64Str: string)=> {
|
|
|
+ audioBase64 += base64Str
|
|
|
+ let buf = decode(base64Str);
|
|
|
+ pushChunk2Quene(buf);
|
|
|
+ }
|
|
|
+
|
|
|
const pushChunk2Quene = (chunk: ArrayBuffer) => {
|
|
|
chunks.push(chunk);
|
|
|
totalLength += chunk.byteLength;
|
|
|
@@ -56,11 +91,11 @@ export const useAudioPlayer = () => {
|
|
|
return;
|
|
|
}
|
|
|
playing = true;
|
|
|
- changeStatus(ECharacterAISTATUS.RESPONDING)
|
|
|
- let tmp = [...chunks]
|
|
|
|
|
|
- const _chunk = combineArrayBuffers(tmp, totalLength)
|
|
|
- const partChunks = combineHeaderAndChunk(wavHeader, _chunk)
|
|
|
+ let tmp = [...chunks];
|
|
|
+ const _chunk = combineArrayBuffers(tmp, totalLength);
|
|
|
+ const partChunks = combineHeaderAndChunk(wavHeader, _chunk);
|
|
|
+
|
|
|
emptyQuene();
|
|
|
//@ts-ignore
|
|
|
audioCtx.decodeAudioData(partChunks, (decodedBuffer: AudioBuffer) => {
|
|
|
@@ -72,8 +107,8 @@ export const useAudioPlayer = () => {
|
|
|
source.onended = () => {
|
|
|
console.log('play end')
|
|
|
playing = false
|
|
|
- changeStatus(ECharacterAISTATUS.IDLE)
|
|
|
playChunk()
|
|
|
+ console.log('finally', audioBase64)
|
|
|
};
|
|
|
source.start(0);
|
|
|
}, (err:any) => {
|
|
|
@@ -91,85 +126,19 @@ export const useAudioPlayer = () => {
|
|
|
requestTask?.offChunkReceived?.()
|
|
|
}
|
|
|
emptyQuene();
|
|
|
- source && source.stop();
|
|
|
+
|
|
|
enablePlay = false;
|
|
|
- changeStatus(ECharacterAISTATUS.IDLE)
|
|
|
- // console.log('stop play chunk')
|
|
|
- }
|
|
|
|
|
|
- const onPlayerStatusChanged = (callback: (status: number)=>void) => {
|
|
|
- playStatusChangedCallback = callback
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+
|
|
|
return {
|
|
|
pushChunk2Quene,
|
|
|
+ pushBase64ToQuene,
|
|
|
playChunk,
|
|
|
stopPlayChunk,
|
|
|
setFistChunk,
|
|
|
- onPlayerStatusChanged,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-function combineArrayBuffers(arrays: ArrayBuffer[], totalLength: number): ArrayBuffer {
|
|
|
- const result = new Uint8Array(totalLength);
|
|
|
- let offset = 0;
|
|
|
-
|
|
|
- for (let i = 0; i < arrays.length; i++) {
|
|
|
- const array = new Uint8Array(arrays[i]);
|
|
|
- result.set(array, offset);
|
|
|
- offset += array.length;
|
|
|
- }
|
|
|
-
|
|
|
- return result.buffer;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-function combineHeaderAndChunk(header:ArrayBuffer, chunk:ArrayBuffer) {
|
|
|
- // Create a new ArrayBuffer to hold both the header and the chunk
|
|
|
- const combinedBuffer = new ArrayBuffer(header.byteLength + chunk.byteLength);
|
|
|
-
|
|
|
- // Create a Uint8Array view of the combined buffer
|
|
|
- const combinedView = new Uint8Array(combinedBuffer);
|
|
|
-
|
|
|
- // Copy the header into the combined buffer
|
|
|
- combinedView.set(new Uint8Array(header), 0);
|
|
|
-
|
|
|
- // Copy the chunk data after the header
|
|
|
- combinedView.set(new Uint8Array(chunk), header.byteLength);
|
|
|
-
|
|
|
- return combinedBuffer;
|
|
|
-}
|
|
|
-
|
|
|
-// // Usage example
|
|
|
-// let storedHeader = null;
|
|
|
-// let isFirstChunk = true;
|
|
|
-
|
|
|
-// ws.onmessage = function(event) {
|
|
|
-// if (isFirstChunk) {
|
|
|
-// // Assume the first 44 bytes are the header
|
|
|
-// storedHeader = event.data.slice(0, 44);
|
|
|
-// // const headerInfo = parseWavHeader(storedHeader);
|
|
|
-// // console.log("WAV Header Info:", headerInfo);
|
|
|
-
|
|
|
-// // Handle the rest of the first chunk as audio data
|
|
|
-// const firstChunkData = event.data.slice(44);
|
|
|
-// const combinedData = combineHeaderAndChunk(storedHeader, firstChunkData);
|
|
|
-// processAudioData(combinedData);
|
|
|
-
|
|
|
-// isFirstChunk = false;
|
|
|
-// } else {
|
|
|
-// // For subsequent chunks, combine with the stored header
|
|
|
-// const combinedData = combineHeaderAndChunk(storedHeader, event.data);
|
|
|
-// processAudioData(combinedData);
|
|
|
-// }
|
|
|
-// };
|
|
|
-
|
|
|
-// function processAudioData(audioData) {
|
|
|
-// // Here you would typically send the data to the Web Audio API
|
|
|
-// // For example:
|
|
|
-// // audioContext.decodeAudioData(audioData)
|
|
|
-// // .then(decodedData => {
|
|
|
-// // // Use the decoded audio data
|
|
|
-// // })
|
|
|
-// // .catch(error => console.error("Error decoding audio data:", error));
|
|
|
-// }
|