Bläddra i källkod

对话流式实现

Zhangbw 2 månader sedan
förälder
incheckning
4048b00840
4 ändrade filer med 393 tillägg och 120 borttagningar
  1. 99 15
      src/ChatDialog.vue
  2. 65 105
      src/CustomerService.vue
  3. 224 0
      src/composables/useStreamChat.js
  4. 5 0
      target/classes/META-INF/mps/autoMapper

+ 99 - 15
src/ChatDialog.vue

@@ -8,21 +8,15 @@
     </div>
 
     <div class="chat-content">
-      <div class="message ai-message">
+      <div v-for="(message, index) in messages" :key="index" :class="['message', message.type === 'ai' ? 'ai-message' : 'user-message']">
         <div class="message-text">
-          你好,我是你的腾讯云音视频 AI 助手!你可以问我:
-          <div class="quick-questions">
+          {{ message.text }}
+          <div v-if="message.hasQuickQuestions" class="quick-questions">
             <div class="question-item">😴 我最近总是失眠,有什么办法可以解决吗?</div>
             <div class="question-item">🛏️ 怎么改善晚上睡不着、早上睡不醒的问题?</div>
             <div class="question-item">🚀 超过半小时才睡着是正常的吗?</div>
           </div>
-        </div>
-      </div>
-
-      <div class="message user-message">
-        <div class="message-text">
-          你好,我是你的睡眠顾问,专门帮人解决睡眠问题的,方便聊几句吗?
-          <el-icon class="pause-icon"><VideoPlay /></el-icon>
+          <el-icon v-if="message.type === 'user' && isAiSpeaking" class="pause-icon"><VideoPlay /></el-icon>
         </div>
       </div>
     </div>
@@ -32,13 +26,15 @@
         <el-icon><ChatDotRound /></el-icon>
       </el-button>
 
-      <el-button circle class="mic-btn">
+      <el-button circle class="mic-btn" :class="{ recording: isRecording }" @click="handleMicClick">
         <el-icon><Microphone /></el-icon>
       </el-button>
 
-      <div class="input-placeholder"></div>
+      <div class="input-placeholder">
+        <span v-if="isRecording" class="recording-text">{{ currentTranscription || '正在录音...' }}</span>
+      </div>
 
-      <el-button circle class="hangup-btn" @click="$emit('close')">
+      <el-button circle class="hangup-btn" @click="handleHangup">
         <el-icon><PhoneFilled /></el-icon>
       </el-button>
     </div>
@@ -46,9 +42,74 @@
 </template>
 
 <script setup>
+import { ref, watch } from 'vue'
 import { ArrowLeft, ChatDotRound, Microphone, PhoneFilled, VideoPlay } from '@element-plus/icons-vue'
-
-defineEmits(['close'])
+import { useStreamChat } from './composables/useStreamChat'
+import { useVoiceRecognition } from './composables/useVoiceRecognition'
+
+const emit = defineEmits(['close'])
+
+const { displayText, conversationId, sendMessage, stopAudio } = useStreamChat()
+const { isRecording, currentTranscription, startRecording, stopRecording } = useVoiceRecognition()
+
+const messages = ref([
+  {
+    type: 'ai',
+    text: '你好,我是你的腾讯云音视频 AI 助手!你可以问我:',
+    hasQuickQuestions: true
+  }
+])
+
+const isAiSpeaking = ref(false)
+
+// 监听displayText变化,实时更新消息
+watch(displayText, (newText) => {
+  if (newText) {
+    const lastMessage = messages.value[messages.value.length - 1]
+    if (lastMessage && lastMessage.type === 'ai' && !lastMessage.hasQuickQuestions) {
+      lastMessage.text = newText
+    } else {
+      messages.value.push({
+        type: 'ai',
+        text: newText
+      })
+    }
+    isAiSpeaking.value = true
+  }
+})
+
+// 处理语音识别
+const handleMicClick = async () => {
+  if (isRecording.value) {
+    stopRecording()
+    if (currentTranscription.value) {
+      await handleSendMessage(currentTranscription.value)
+    }
+  } else {
+    await startRecording()
+  }
+}
+
+// 发送消息
+const handleSendMessage = async (text) => {
+  messages.value.push({
+    type: 'user',
+    text
+  })
+
+  try {
+    await sendMessage(text, null, '1', [], false)
+    isAiSpeaking.value = false
+  } catch (error) {
+    console.error('发送消息失败:', error)
+  }
+}
+
+// 挂断电话
+const handleHangup = () => {
+  stopAudio()
+  emit('close')
+}
 </script>
 
 <style scoped>
@@ -140,6 +201,29 @@ defineEmits(['close'])
   flex: 1;
   height: 40px;
   border-bottom: 2px dotted #d1d5db;
+  display: flex;
+  align-items: center;
+  padding: 0 12px;
+}
+
+.recording-text {
+  color: #ef4444;
+  font-size: 14px;
+}
+
+.mic-btn.recording {
+  background: #ef4444;
+  color: white;
+  animation: pulse 1.5s infinite;
+}
+
+@keyframes pulse {
+  0%, 100% {
+    opacity: 1;
+  }
+  50% {
+    opacity: 0.5;
+  }
 }
 
 .hangup-btn {

+ 65 - 105
src/CustomerService.vue

@@ -192,6 +192,7 @@
 import { ref, computed, watch, nextTick, onMounted, onUnmounted } from 'vue'
 import { ArrowUp, ArrowDown, Microphone, PhoneFilled, ChatDotRound, Mute } from '@element-plus/icons-vue'
 import { useVoiceRecognition } from './composables/useVoiceRecognition.js'
+import { useStreamChat } from './composables/useStreamChat.js'
 import { ElMessage } from 'element-plus'
 
 // 获取请求头(包含token)
@@ -216,6 +217,9 @@ const chatContent = ref(null)
 // 语音识别
 const { isRecording, currentTranscription, tempTranscription, startRecording, stopRecording } = useVoiceRecognition()
 
+// 流式聊天
+const { displayText, conversationId: streamConversationId, sendMessage: sendStreamMessage, stopAudio: stopStreamAudio } = useStreamChat()
+
 // 当前播放的音频对象
 const currentAudio = ref(null)
 
@@ -224,7 +228,9 @@ const currentRequestId = ref(0)
 
 // 停止当前的音频播放和输出
 const stopCurrentOutput = () => {
-  // 停止音频播放
+  // 停止流式音频播放
+  stopStreamAudio()
+  // 停止本地音频播放
   if (currentAudio.value) {
     currentAudio.value.pause()
     currentAudio.value.currentTime = 0
@@ -342,52 +348,21 @@ watch(currentTranscription, async (newVal, oldVal) => {
       })
       scrollToBottom()
 
-      // 发送到后端处理
+      // 发送到后端处理(使用流式接口)
       try {
         // 停止当前的音频播放
         stopCurrentOutput()
 
-        // 增加请求ID,标记这是最新的请求
-        currentRequestId.value++
-        const thisRequestId = currentRequestId.value
-
         const selectedAgentData = agents.value.find(a => a.id === selectedAgent.value)
-        const response = await fetch('http://localhost:8080/talk/message', {
-          method: 'POST',
-          headers: getHeaders(),
-          body: JSON.stringify({
-            message: newContent,
-            agentId: selectedAgent.value,
-            agentGender: selectedAgentData?.gender === 'male' ? '0' : '1',
-            ttsVcnList: ttsVcnList.value,
-            conversationId: currentConversationId.value,
-            requestId: thisRequestId
-          })
-        })
 
-        const data = await response.json()
-
-        // 保存 conversationId
-        if (data.conversationId) {
-          currentConversationId.value = data.conversationId
-        }
-
-        // 只有当这是最新的请求时,才显示回复和播放音频
-        if (thisRequestId === currentRequestId.value) {
-          // 添加客服回复
-          chatHistory.value.push({
-            type: 'agent',
-            content: data.reply
-          })
-          scrollToBottom()
-
-          // 播放语音
-          if (data.audio) {
-            playAudio(data.audio)
-          }
-        } else {
-          console.log('忽略旧请求的回复')
-        }
+        // 使用流式接口发送消息
+        await sendStreamMessage(
+          newContent,
+          selectedAgent.value,
+          selectedAgentData?.gender === 'male' ? '0' : '1',
+          ttsVcnList.value,
+          false
+        )
       } catch (error) {
         console.error('发送消息失败:', error)
         ElMessage.error('发送消息失败')
@@ -396,6 +371,30 @@ watch(currentTranscription, async (newVal, oldVal) => {
   }
 })
 
+// 监听流式文本变化,实时更新聊天历史
+watch(displayText, (newText) => {
+  if (newText && showChat.value) {
+    const lastMessage = chatHistory.value[chatHistory.value.length - 1]
+    if (lastMessage && lastMessage.type === 'agent' && !lastMessage.isGreeting) {
+      lastMessage.content = newText
+    } else {
+      chatHistory.value.push({
+        type: 'agent',
+        content: newText,
+        isGreeting: false
+      })
+    }
+    scrollToBottom()
+  }
+})
+
+// 同步conversationId
+watch(streamConversationId, (newId) => {
+  if (newId) {
+    currentConversationId.value = newId
+  }
+})
+
 // 监听 showChat 变化,开始对话时自动启动语音识别
 watch(showChat, async (newVal) => {
   if (newVal && !isRecording.value) {
@@ -415,7 +414,12 @@ watch(showChat, async (newVal) => {
     }
 
     // 停止音频播放
-    stopAudio()
+    stopStreamAudio()
+    if (currentAudio.value) {
+      currentAudio.value.pause()
+      currentAudio.value.currentTime = 0
+      currentAudio.value = null
+    }
 
     // 清理波形动画
     if (waveformInterval) {
@@ -510,53 +514,21 @@ const sendTextMessage = async () => {
   // 清空输入框
   textMessage.value = ''
 
-  // 发送到后端处理
+  // 发送到后端处理(使用流式接口)
   try {
     // 停止当前的音频播放
     stopCurrentOutput()
 
-    // 增加请求ID,标记这是最新的请求
-    currentRequestId.value++
-    const thisRequestId = currentRequestId.value
-
     const selectedAgentData = agents.value.find(a => a.id === selectedAgent.value)
-    const response = await fetch('http://localhost:8080/talk/message', {
-      method: 'POST',
-      headers: getHeaders(),
-      body: JSON.stringify({
-        message: content,
-        agentId: selectedAgent.value,
-        agentGender: selectedAgentData?.gender === 'male' ? '0' : '1',
-        ttsVcnList: ttsVcnList.value,
-        conversationId: currentConversationId.value,
-        requestId: thisRequestId
-      })
-    })
-
-    const data = await response.json()
 
-    // 保存 conversationId
-    if (data.conversationId) {
-      currentConversationId.value = data.conversationId
-    }
-    console.log('解析后的audio字段长度:', data.audio ? data.audio.length : 0)
-
-    // 只有当这是最新的请求时,才显示回复和播放音频
-    if (thisRequestId === currentRequestId.value) {
-      // 添加客服回复
-      chatHistory.value.push({
-        type: 'agent',
-        content: data.reply
-      })
-      scrollToBottom()
-
-      // 播放语音
-      if (data.audio) {
-        playAudio(data.audio)
-      }
-    } else {
-      console.log('忽略旧请求的回复')
-    }
+    // 使用流式接口发送消息
+    await sendStreamMessage(
+      content,
+      selectedAgent.value,
+      selectedAgentData?.gender === 'male' ? '0' : '1',
+      ttsVcnList.value,
+      false
+    )
   } catch (error) {
     console.error('发送消息失败:', error)
     ElMessage.error('发送消息失败')
@@ -752,31 +724,19 @@ const startChat = async () => {
     if (selectedAgentData.greetingMessage) {
       chatHistory.value = [{
         type: 'agent',
-        content: selectedAgentData.greetingMessage
+        content: selectedAgentData.greetingMessage,
+        isGreeting: true
       }]
 
-      // 调用后端生成欢迎语语音(isGreeting=true 标识不发送到 dify 工作流)
+      // 使用流式接口生成欢迎语语音
       try {
-        const ttsResponse = await fetch('http://localhost:8080/talk/message', {
-          method: 'POST',
-          headers: getHeaders(),
-          body: JSON.stringify({
-            message: selectedAgentData.greetingMessage,
-            agentId: selectedAgent.value,
-            isGreeting: true,
-            conversationId: currentConversationId.value
-          })
-        })
-
-        const ttsData = await ttsResponse.json()
-
-        // 保存 conversationId
-        if (ttsData.conversationId) {
-          currentConversationId.value = ttsData.conversationId
-        }
-        if (ttsData.audio) {
-          playAudio(ttsData.audio)
-        }
+        await sendStreamMessage(
+          selectedAgentData.greetingMessage,
+          selectedAgent.value,
+          null,
+          [],
+          true
+        )
       } catch (error) {
         console.error('生成欢迎语语音失败:', error)
       }

+ 224 - 0
src/composables/useStreamChat.js

@@ -0,0 +1,224 @@
+import { ref } from 'vue'
+
+export function useStreamChat() {
+  const displayText = ref('')
+  const audioQueue = ref([])
+  const isPlaying = ref(false)
+  const conversationId = ref('')
+
+  let currentRequestId = 0  // 请求序列号,用于标识最新请求
+
+  // Web Audio API 上下文
+  let audioContext = null
+  let nextStartTime = 0
+  let scheduledSources = [] // 已调度的音频源列表
+
+  const initAudioContext = () => {
+    if (!audioContext) {
+      audioContext = new (window.AudioContext || window['webkitAudioContext'])()
+    }
+    return audioContext
+  }
+
+  const playNextAudio = async () => {
+    if (audioQueue.value.length === 0) return
+
+    const audioData = audioQueue.value.shift()
+
+    try {
+      const ctx = initAudioContext()
+
+      // 解码base64音频数据
+      const binaryString = atob(audioData)
+      const bytes = new Uint8Array(binaryString.length)
+      for (let i = 0; i < binaryString.length; i++) {
+        bytes[i] = binaryString.charCodeAt(i)
+      }
+
+      // 解码音频
+      const audioBuffer = await ctx.decodeAudioData(bytes.buffer)
+
+      // 创建音频源
+      const source = ctx.createBufferSource()
+      source.buffer = audioBuffer
+      source.connect(ctx.destination)
+
+      // 计算开始时间,实现无缝播放
+      const currentTime = ctx.currentTime
+      if (nextStartTime < currentTime) {
+        nextStartTime = currentTime
+      }
+
+      source.start(nextStartTime)
+      nextStartTime += audioBuffer.duration
+
+      // 保存到已调度列表
+      scheduledSources.push(source)
+
+      // 当音频播放完成时,从列表中移除
+      source.onended = () => {
+        const index = scheduledSources.indexOf(source)
+        if (index > -1) {
+          scheduledSources.splice(index, 1)
+        }
+        if (scheduledSources.length === 0 && audioQueue.value.length === 0) {
+          isPlaying.value = false
+        }
+      }
+
+      isPlaying.value = true
+
+      // 立即处理队列中的下一个音频(预加载)
+      if (audioQueue.value.length > 0) {
+        playNextAudio()
+      }
+    } catch (err) {
+      console.error('播放音频失败:', err)
+      // 继续播放下一个
+      if (audioQueue.value.length > 0) {
+        playNextAudio()
+      } else if (scheduledSources.length === 0) {
+        isPlaying.value = false
+      }
+    }
+  }
+
+  const sendMessage = (message, agentId, agentGender, ttsVcnList, isGreeting = false) => {
+    return new Promise((resolve, reject) => {
+      // 发送新消息前,停止当前播放的音频
+      stopAudio()
+
+      // 递增请求ID,标识这是最新的请求
+      currentRequestId++
+      const thisRequestId = currentRequestId
+
+      // 清空显示内容和音频队列(为新请求做准备)
+      displayText.value = ''
+      audioQueue.value = []
+
+      const token = localStorage.getItem('talk_token')
+      const requestBody = {
+        message,
+        agentId,
+        agentGender,
+        ttsVcnList,
+        conversationId: conversationId.value,
+        isGreeting,
+        requestId: thisRequestId  // 发送requestId到后端
+      }
+
+      fetch('http://localhost:8080/talk/message/stream', {
+        method: 'POST',
+        headers: {
+          'Content-Type': 'application/json',
+          'Authorization': token ? `Bearer ${token}` : '',
+          'clientid': 'talk-web'
+        },
+        body: JSON.stringify(requestBody)
+      }).then(response => {
+        const reader = response.body.getReader()
+        const decoder = new TextDecoder()
+        let buffer = '' // 累积未处理的数据
+
+        const readStream = () => {
+          reader.read().then(({ done, value }) => {
+            if (done) {
+              console.log(`[RequestId=${thisRequestId}] 流读取完成`)
+              resolve()
+              return
+            }
+
+            const chunk = decoder.decode(value, { stream: true })
+            console.log(`[RequestId=${thisRequestId}] 收到数据块, 长度:`, chunk.length)
+
+            // 将新数据添加到缓冲区
+            buffer += chunk
+
+            // 按行分割,但保留最后一个不完整的行
+            const lines = buffer.split('\n')
+            buffer = lines.pop() || '' // 保存最后一个可能不完整的行
+
+            lines.forEach(line => {
+              if (line.startsWith('data:')) {
+                const data = line.substring(5).trim()
+                if (!data) return
+
+                try {
+                  const event = JSON.parse(data)
+
+                  // 只处理最新请求的响应
+                  if (thisRequestId === currentRequestId) {
+                    if (event.name === 'text') {
+                      console.log(`[RequestId=${thisRequestId}] 收到文本:`, event.data)
+                      displayText.value += event.data
+                    } else if (event.name === 'audio') {
+                      console.log(`[RequestId=${thisRequestId}] 收到音频, 长度:`, event.data.length)
+                      audioQueue.value.push(event.data)
+                      playNextAudio()
+                    } else if (event.name === 'conversationId') {
+                      console.log(`[RequestId=${thisRequestId}] 收到conversationId:`, event.data)
+                      conversationId.value = event.data
+                    } else if (event.name === 'done') {
+                      console.log(`[RequestId=${thisRequestId}] 收到完成信号`)
+                      // 不要立即resolve,继续读取可能还有数据
+                      // resolve()
+                    }
+                  } else {
+                    console.warn(`[RequestId=${thisRequestId}] 忽略旧请求的响应, 当前requestId=${currentRequestId}, 事件类型=${event.name}`)
+                    // 旧请求的响应,只更新conversationId以保持上下文连续
+                    if (event.name === 'conversationId') {
+                      conversationId.value = event.data
+                    }
+                  }
+                } catch (e) {
+                  console.error('解析SSE数据失败:', e, '原始数据:', line)
+                }
+              } else if (line.startsWith('event:')) {
+                // 事件类型行,暂时忽略
+              }
+            })
+
+            readStream()
+          }).catch(err => {
+            console.error(`[RequestId=${thisRequestId}] 读取流失败:`, err)
+            reject(err)
+          })
+        }
+
+        readStream()
+      }).catch(err => {
+        console.error('发送消息失败:', err)
+        reject(err)
+      })
+    })
+  }
+
+  const stopAudio = () => {
+    // 停止所有已调度的音频源
+    scheduledSources.forEach(source => {
+      try {
+        source.stop()
+      } catch (e) {
+        // 音频可能已经停止,忽略错误
+      }
+    })
+    scheduledSources = []
+
+    // 清空音频队列
+    audioQueue.value = []
+    isPlaying.value = false
+
+    // 重置播放时间
+    nextStartTime = 0
+
+    // 递增requestId,使后续到达的旧响应被忽略
+    currentRequestId++
+  }
+
+  return {
+    displayText,
+    conversationId,
+    sendMessage,
+    stopAudio
+  }
+}

+ 5 - 0
target/classes/META-INF/mps/autoMapper

@@ -1,2 +1,7 @@
+org.dromara.web.domain.vo.TenantListVo
+org.dromara.talk.domain.bo.TalkSessionBo
 org.dromara.talk.domain.vo.TalkAgentVo
+org.dromara.talk.domain.vo.TalkSessionVo
+org.dromara.talk.domain.bo.TalkAgentBo
+org.dromara.talk.domain.vo.TalkUserVo
 org.dromara.talk.domain.bo.TalkUserBo