app.py
42.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
###############################################################################
# Copyright (C) 2024 LiveTalking@lipku https://github.com/lipku/LiveTalking
# email: lipku@foxmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# server.py
from flask import Flask, render_template,send_from_directory,request, jsonify
from flask_sockets import Sockets
import base64
import json
#import gevent
#from gevent import pywsgi
#from geventwebsocket.handler import WebSocketHandler
import re
import numpy as np
from threading import Thread,Event
#import multiprocessing
import torch.multiprocessing as mp
from aiohttp import web, WSMsgType
import aiohttp
import aiohttp_cors
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.rtcrtpsender import RTCRtpSender
from webrtc import HumanPlayer
from basereal import BaseReal
from llm import llm_response
import argparse
import random
import shutil
import asyncio
import torch
from typing import Dict
from logger import logger
import gc
import weakref
import time
# 注意:server_recording_api模块已移除,相关功能已迁移到其他模块
# 导入新的统一WebSocket管理架构
from core.app_websocket_migration import (
get_app_websocket_migration,
initialize_app_websocket_migration,
setup_app_websocket_routes,
broadcast_message_to_session,
handle_asr_audio_data,
handle_start_asr_recognition,
handle_stop_asr_recognition,
send_asr_result,
send_normal_asr_result
)
app = Flask(__name__)
#sockets = Sockets(app)
nerfreals:Dict[int, BaseReal] = {} #sessionid:BaseReal
# WebSocket连接管理已迁移到统一架构
# websocket_connections和asr_connections现在通过迁移层管理
# 全局事件循环引用,用于跨线程异步调用
main_event_loop = None
opt = None
model = None
avatar = None
# WebSocket迁移实例
websocket_migration = None
#####webrtc###############################
pcs = set()
# WebSocket消息推送函数已迁移到统一架构
# 通过 core.app_websocket_migration 模块提供兼容性接口
# WebSocket处理器已迁移到统一架构
# 通过 core.app_websocket_migration 模块提供
def randN(N)->int:
'''生成长度为 N的随机数 '''
min = pow(10, N - 1)
max = pow(10, N)
return random.randint(min, max - 1)
def build_nerfreal(sessionid:int)->BaseReal:
import time
import gc
opt.sessionid=sessionid
logger.info('[SessionID:%d] Building %s model instance' % (sessionid, opt.model))
try:
model_start = time.time()
if opt.model == 'wav2lip':
logger.info('[SessionID:%d] Loading Wav2Lip model...' % sessionid)
from lipreal import LipReal
nerfreal = LipReal(opt,model,avatar)
elif opt.model == 'musetalk':
logger.info('[SessionID:%d] Loading MuseTalk model...' % sessionid)
from musereal import MuseReal
nerfreal = MuseReal(opt,model,avatar)
elif opt.model == 'ernerf':
logger.info('[SessionID:%d] Loading ERNeRF model...' % sessionid)
from nerfreal import NeRFReal
nerfreal = NeRFReal(opt,model,avatar)
elif opt.model == 'ultralight':
logger.info('[SessionID:%d] Loading UltraLight model...' % sessionid)
from lightreal import LightReal
nerfreal = LightReal(opt,model,avatar)
else:
raise ValueError(f"Unknown model type: {opt.model}")
model_end = time.time()
model_duration = model_end - model_start
logger.info('[SessionID:%d] %s model loaded successfully in %.3f seconds' % (sessionid, opt.model, model_duration))
# 强制垃圾回收以释放内存
gc.collect()
return nerfreal
except Exception as e:
logger.error('[SessionID:%d] Failed to build %s model: %s' % (sessionid, opt.model, str(e)))
# 清理可能的部分初始化资源
gc.collect()
raise e
#@app.route('/offer', methods=['POST'])
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
if len(nerfreals) >= opt.max_session:
logger.info('reach max session')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": "reach max session"}
),
)
sessionid = randN(6) #len(nerfreals)
logger.info('[SessionID:%d] Starting session initialization',sessionid)
nerfreals[sessionid] = None
# 记录模型初始化开始时间
import time
model_init_start = time.time()
logger.info('[SessionID:%d] Starting model initialization for %s' % (sessionid, opt.model))
nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal,sessionid)
# 记录模型初始化完成时间
model_init_end = time.time()
init_duration = model_init_end - model_init_start
logger.info('[SessionID:%d] Model initialization completed in %.3f seconds' % (sessionid, init_duration))
nerfreals[sessionid] = nerfreal
pc = RTCPeerConnection()
pcs.add(pc)
# 添加ICE连接状态监控
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] ICE connection state changed to %s at %.3f" % (sessionid, pc.iceConnectionState, timestamp))
if pc.iceConnectionState == "checking":
logger.info("[SessionID:%d] ICE connectivity checks in progress..." % sessionid)
elif pc.iceConnectionState == "connected":
logger.info("[SessionID:%d] ICE connection established" % sessionid)
elif pc.iceConnectionState == "completed":
logger.info("[SessionID:%d] ICE connection completed" % sessionid)
elif pc.iceConnectionState == "failed":
logger.error("[SessionID:%d] ICE connection failed" % sessionid)
elif pc.iceConnectionState == "disconnected":
logger.warning("[SessionID:%d] ICE connection disconnected" % sessionid)
# 添加ICE候选者收集状态监控
@pc.on("icegatheringstatechange")
async def on_icegatheringstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] ICE gathering state changed to %s at %.3f" % (sessionid, pc.iceGatheringState, timestamp))
if pc.iceGatheringState == "gathering":
logger.info("[SessionID:%d] ICE candidates gathering..." % sessionid)
elif pc.iceGatheringState == "complete":
logger.info("[SessionID:%d] ICE candidates gathering completed" % sessionid)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] Connection state changed to %s at %.3f" % (sessionid, pc.connectionState, timestamp))
if pc.connectionState == "connecting":
logger.info("[SessionID:%d] WebRTC connection establishing..." % sessionid)
elif pc.connectionState == "connected":
logger.info("[SessionID:%d] WebRTC connection established successfully" % sessionid)
elif pc.connectionState == "failed":
logger.error("[SessionID:%d] WebRTC connection failed" % sessionid)
await pc.close()
pcs.discard(pc)
if sessionid in nerfreals:
del nerfreals[sessionid]
elif pc.connectionState == "closed":
logger.info("[SessionID:%d] WebRTC connection closed" % sessionid)
pcs.discard(pc)
if sessionid in nerfreals:
del nerfreals[sessionid]
gc.collect()
# 记录音视频轨道初始化开始时间
track_init_start = time.time()
logger.info('[SessionID:%d] Initializing audio/video tracks' % sessionid)
player = HumanPlayer(nerfreals[sessionid])
logger.info('[SessionID:%d] HumanPlayer created' % sessionid)
audio_sender = pc.addTrack(player.audio)
logger.info('[SessionID:%d] Audio track added' % sessionid)
video_sender = pc.addTrack(player.video)
logger.info('[SessionID:%d] Video track added' % sessionid)
# 记录音视频轨道初始化完成时间
track_init_end = time.time()
track_duration = track_init_end - track_init_start
logger.info('[SessionID:%d] Audio/video tracks initialized in %.3f seconds' % (sessionid, track_duration))
# 记录编解码器配置开始时间
codec_start = time.time()
logger.info('[SessionID:%d] Configuring video codecs' % sessionid)
capabilities = RTCRtpSender.getCapabilities("video")
preferences = list(filter(lambda x: x.name == "H264", capabilities.codecs))
preferences += list(filter(lambda x: x.name == "VP8", capabilities.codecs))
preferences += list(filter(lambda x: x.name == "rtx", capabilities.codecs))
logger.info('[SessionID:%d] Available codecs: %s' % (sessionid, [codec.name for codec in preferences]))
transceiver = pc.getTransceivers()[1]
transceiver.setCodecPreferences(preferences)
# 记录编解码器配置完成时间
codec_end = time.time()
codec_duration = codec_end - codec_start
logger.info('[SessionID:%d] Video codecs configured in %.3f seconds' % (sessionid, codec_duration))
# 记录SDP协商开始时间
import time
sdp_start = time.time()
logger.info('[SessionID:%d] Starting SDP negotiation' % sessionid)
await pc.setRemoteDescription(offer)
logger.info('[SessionID:%d] Remote description set' % sessionid)
answer = await pc.createAnswer()
logger.info('[SessionID:%d] Answer created' % sessionid)
await pc.setLocalDescription(answer)
# 记录SDP协商完成时间
sdp_end = time.time()
sdp_duration = sdp_end - sdp_start
logger.info('[SessionID:%d] SDP negotiation completed in %.3f seconds' % (sessionid, sdp_duration))
#return jsonify({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type})
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type, "sessionid":sessionid}
),
)
async def human(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
user_message = params.get('text', '')
message_type = params.get('type', 'echo')
# 检测请求来源(通过User-Agent或自定义头部)
user_agent = request.headers.get('User-Agent', '')
request_source = "第三方服务" if 'python' in user_agent.lower() or 'curl' in user_agent.lower() or 'postman' in user_agent.lower() else "页面"
# 如果有自定义来源标识,优先使用
if 'X-Request-Source' in request.headers:
request_source = request.headers['X-Request-Source']
if params.get('interrupt'):
nerfreals[sessionid].flush_talk()
# 推送用户消息到WebSocket(统一推送所有用户输入)
await broadcast_message_to_session(sessionid, message_type, user_message, "用户", None, request_source)
ai_response = None
model_info = None
if message_type == 'echo':
nerfreals[sessionid].put_msg_txt(user_message)
ai_response = user_message
model_info = "Echo模式"
# 推送回音消息到WebSocket
await broadcast_message_to_session(sessionid, 'echo', user_message, "回音", model_info, request_source)
elif message_type == 'chat':
# 获取当前使用的大模型信息
model_info = getattr(nerfreals[sessionid], 'llm_model_name', 'Unknown LLM')
if hasattr(nerfreals[sessionid], 'llm') and hasattr(nerfreals[sessionid].llm, 'model_name'):
model_info = nerfreals[sessionid].llm.model_name
ai_response = await asyncio.get_event_loop().run_in_executor(None, llm_response, user_message, nerfreals[sessionid])
# 推送AI回复到WebSocket(包含大模型信息)
await broadcast_message_to_session(sessionid, 'chat', ai_response, "AI助手", model_info, request_source)
# 注释掉的代码保持不变,因为数字人回复通过其他方式处理
#nerfreals[sessionid].put_msg_txt(ai_response)
# 只返回简单的处理状态,所有数据通过WebSocket推送
return web.Response(
content_type="application/json",
text=json.dumps({
"code": 0,
"message": "消息已处理并推送"
}),
)
except Exception as e:
error_msg = str(e)
logger.exception('exception:')
# 推送错误消息到WebSocket
try:
sessionid = params.get('sessionid', 0) if 'params' in locals() else 0
request_source = "页面" # 默认来源
await broadcast_message_to_session(sessionid, 'error', f"处理消息时发生错误: {error_msg}", "系统错误", "Error", request_source)
except:
pass # 如果WebSocket推送也失败,不影响HTTP响应
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": error_msg, "error_details": error_msg}
),
)
async def interrupt_talk(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
nerfreals[sessionid].flush_talk()
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "msg":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
from pydub import AudioSegment
from io import BytesIO
async def ensure_asr_connection(sessionid: int) -> bool:
"""确保ASR连接可用"""
# 通过迁移实例获取ASR连接
migration = get_app_websocket_migration()
if sessionid not in migration.asr_connections:
return await create_asr_connection(sessionid)
asr_client = migration.asr_connections[sessionid]
# 检查连接状态
if not asr_client.is_connected():
logger.warning(f"[SessionID:{sessionid}] ASR连接已断开,尝试重连")
try:
# 重新连接
success = await asyncio.get_event_loop().run_in_executor(
None, asr_client.connect
)
if success:
logger.info(f"[SessionID:{sessionid}] ASR重连成功")
return True
else:
logger.error(f"[SessionID:{sessionid}] ASR重连失败")
# 清理失效连接
migration = get_app_websocket_migration()
if sessionid in migration.asr_connections:
del migration.asr_connections[sessionid]
return False
except Exception as e:
logger.error(f"[SessionID:{sessionid}] ASR重连异常: {e}")
del asr_connections[sessionid]
return False
return True
async def create_asr_connection(sessionid: int) -> bool:
"""创建新的ASR连接"""
try:
from funasr_asr_sync import FunASRSync
username = f'User_{sessionid}' # 修复大小写不一致:user_ -> User_
asr_client = FunASRSync(username)
# 设置结果回调
def on_asr_result(result):
if isinstance(result, str):
result_data = {
'text': result,
'is_final': True,
'confidence': 1.0
}
else:
result_data = result
# 线程安全地调度异步任务
try:
# 优先使用全局事件循环引用
if main_event_loop is not None and not main_event_loop.is_closed():
# 使用全局事件循环进行跨线程调用
asyncio.run_coroutine_threadsafe(
# send_asr_result(sessionid, result_data), main_event_loop
send_normal_asr_result(sessionid, result_data), main_event_loop
)
logger.debug(f"[SessionID:{sessionid}] 使用全局事件循环发送ASR结果")
else:
# 降级处理:尝试获取当前线程的事件循环
try:
loop = asyncio.get_event_loop()
if loop.is_running():
loop.call_soon_threadsafe(
lambda: asyncio.create_task(send_normal_asr_result(sessionid, result_data))
)
else:
asyncio.create_task(send_normal_asr_result(sessionid, result_data))
except RuntimeError:
# 最终降级:仅记录日志
logger.info(f"[SessionID:{sessionid}] ASR识别结果: {result_data.get('text', 'N/A')}")
logger.warning(f"[SessionID:{sessionid}] 无法发送ASR结果到客户端,事件循环不可用")
except Exception as e:
logger.error(f"[SessionID:{sessionid}] ASR结果处理异常: {e}")
# 至少记录识别结果
logger.info(f"[SessionID:{sessionid}] ASR识别结果: {result_data.get('text', 'N/A')}")
asr_client.set_result_callback(on_asr_result)
# 异步连接
success = await asyncio.get_event_loop().run_in_executor(
None, asr_client.connect
)
if success:
# 通过迁移实例存储ASR连接
migration = get_app_websocket_migration()
migration.asr_connections[sessionid] = asr_client
logger.info(f"[SessionID:{sessionid}] ASR连接创建成功")
return True
else:
logger.error(f"[SessionID:{sessionid}] ASR连接创建失败")
return False
except Exception as e:
logger.error(f"[SessionID:{sessionid}] 创建ASR连接异常: {e}")
return False
async def humanaudio(request):
try:
# 检查请求内容类型,支持FormData和JSON两种格式
content_type = request.headers.get('content-type', '')
# 处理FormData格式(文件上传)
reader = await request.multipart()
sessionid = 0
fileobj = None
# 默认启用语音本地服务
asr_service = "funasr"
# 读取FormData字段
async for field in reader:
if field.name == 'sessionid':
sessionid = int(await field.text())
logger.info(f'Parsed sessionid: {sessionid}')
elif field.name == 'audio':
fileobj = field
filename = field.filename
filebytes = await field.read()
# 输出文件大小信息
logger.info(f'Audio file content size: {len(filebytes)} bytes')
if not fileobj:
return web.Response(
content_type="application/json",
text=json.dumps({"code": -1, "msg": "No audio file provided"})
)
elif field.name == 'asr_service':
asr_service = (await field.text()).strip().lower()
# 根据文件名判断是否为 MP3 文件
is_mp3 = filename.lower().endswith('.mp3') if filename else False
# 处理MP3转WAV
if is_mp3:
try:
with BytesIO(filebytes) as audio_buffer:
audio = AudioSegment.from_file(audio_buffer, format="mp3")
out_io = BytesIO()
audio.export(out_io, format="wav")
filebytes = out_io.getvalue()
except Exception as e:
logger.error(f"[SessionID:{sessionid}] 音频处理失败: {e}")
raise
# 获取WebSocket迁移实例来访问连接信息
migration = get_app_websocket_migration()
active_sessions = migration.get_websocket_connections()
logger.info(f'[SessionID:{sessionid}] 收到登录请求,当前连接池: {list(active_sessions.keys())}')
# 验证sessionid是否存在
if sessionid not in nerfreals:
return web.Response(
content_type="application/json",
text=json.dumps({"code": -1, "msg": f"Session {sessionid} not found. Please establish WebRTC connection first."})
)
# 发送音频数据进行处理 数字人播报
nerfreals[sessionid].put_audio_file(filebytes)
# ---------- ASR 分流 ----------
if asr_service == 'funasr':
await handle_funasr(sessionid, filebytes)
elif asr_service == 'doubao':
await handle_doubao(sessionid, filebytes)
else:
logger.warning(f'[SessionID:{sessionid}] 未指定或未知 asr_service,跳过 ASR')
# 通过迁移实例检查ASR连接状态
migration = get_app_websocket_migration()
asr_enabled = sessionid in migration.asr_connections
return web.Response(
content_type="application/json",
text=json.dumps({"code": 0, "msg": "ok", "asr_enabled": asr_enabled})
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps( {"code": -1, "msg": str(e)})
)
async def handle_funasr(sessionid: int, audio_bytes: bytes):
# ASR识别处理 - 使用新的连接管理机制
try:
# 确保ASR连接可用
asr_available = await ensure_asr_connection(sessionid)
if asr_available:
# 发送音频数据到ASR服务进行识别
# 通过迁移实例获取ASR连接
migration = get_app_websocket_migration()
asr_client = migration.asr_connections[sessionid]
if hasattr(asr_client, 'send_audio_data'):
asr_client.send_audio_data(audio_bytes)
logger.info(f'[SessionID:{sessionid}] 音频数据已发送到ASR服务进行识别')
else:
logger.warning(f'[SessionID:{sessionid}] ASR客户端不支持send_audio_data方法')
else:
logger.warning(f'[SessionID:{sessionid}] ASR连接不可用,跳过语音识别')
except Exception as asr_error:
logger.error(f'[SessionID:{sessionid}] ASR处理错误: {asr_error}')
# ASR错误不影响主要功能,继续返回成功
# 导入 Doubao ASR 服务
from asr.doubao.service_factory import recognize_audio_data
import os
import json
async def handle_doubao(sessionid: int, audio_bytes: bytes):
"""云端 Doubao 调用"""
try:
logger.info(f"[SessionID:{sessionid}] 使用云端 Doubao 识别")
# 读取豆包ASR配置文件
config_path = os.path.join(os.path.dirname(__file__), 'asr', 'doubao', 'config.json')
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
# 获取认证配置
auth_config = config.get('auth_config', {})
app_key = auth_config.get('app_key')
access_key = auth_config.get('access_key')
if not app_key or not access_key:
raise ValueError("豆包ASR认证配置缺失:app_key 或 access_key 未配置")
result = await recognize_audio_data(
audio_data=audio_bytes,
app_key=app_key,
access_key=access_key,
streaming=True,
result_callback=lambda res: logger.info(f"[SessionID:{sessionid}] Doubao 识别结果: {res}")
)
return result
except Exception as e:
logger.error(f"[SessionID:{sessionid}] Doubao 错误: {e}")
raise
async def set_audiotype(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
nerfreals[sessionid].set_custom_state(params['audiotype'],params['reinit'])
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
async def record(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
if params['type']=='start_record':
# nerfreals[sessionid].put_msg_txt(params['text'])
nerfreals[sessionid].start_recording()
elif params['type']=='end_record':
nerfreals[sessionid].stop_recording()
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
async def is_speaking(request):
params = await request.json()
sessionid = params.get('sessionid',0)
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data": nerfreals[sessionid].is_speaking()}
),
)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
async def post(url,data):
try:
async with aiohttp.ClientSession() as session:
async with session.post(url,data=data) as response:
return await response.text()
except aiohttp.ClientError as e:
logger.info(f'Error: {e}')
async def run(push_url,sessionid):
nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal,sessionid)
nerfreals[sessionid] = nerfreal
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
logger.info("Connection state is %s" % pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
player = HumanPlayer(nerfreals[sessionid])
audio_sender = pc.addTrack(player.audio)
video_sender = pc.addTrack(player.video)
await pc.setLocalDescription(await pc.createOffer())
answer = await post(push_url,pc.localDescription.sdp)
await pc.setRemoteDescription(RTCSessionDescription(sdp=answer,type='answer'))
##########################################
# os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
# os.environ['MULTIPROCESSING_METHOD'] = 'forkserver'
if __name__ == '__main__':
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--pose', type=str, default="data/data_kf.json", help="transforms.json, pose source")
parser.add_argument('--au', type=str, default="data/au.csv", help="eye blink area")
parser.add_argument('--torso_imgs', type=str, default="", help="torso images path")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye")
parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use")
parser.add_argument('--workspace', type=str, default='data/video')
parser.add_argument('--seed', type=int, default=0)
### training options
parser.add_argument('--ckpt', type=str, default='data/pretrained/ngp_kf.pth')
parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
### loss set
parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps")
parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss")
parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss")
parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss")
parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss")
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--bg_img', type=str, default='white', help="background image")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--init_lips', action='store_true', help="init lips region")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
#parser.add_argument('--asr_model', type=str, default='deepspeech')
parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') #
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
# parser.add_argument('--asr_model', type=str, default='facebook/hubert-large-ls960-ft')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=8)
parser.add_argument('-r', type=int, default=10)
parser.add_argument('--fullbody', action='store_true', help="fullbody human")
parser.add_argument('--fullbody_img', type=str, default='data/fullbody/img')
parser.add_argument('--fullbody_width', type=int, default=580)
parser.add_argument('--fullbody_height', type=int, default=1080)
parser.add_argument('--fullbody_offset_x', type=int, default=0)
parser.add_argument('--fullbody_offset_y', type=int, default=0)
#musetalk opt
parser.add_argument('--avatar_id', type=str, default='avator_1')
parser.add_argument('--bbox_shift', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=16)
# parser.add_argument('--customvideo', action='store_true', help="custom video")
# parser.add_argument('--customvideo_img', type=str, default='data/customvideo/img')
# parser.add_argument('--customvideo_imgnum', type=int, default=1)
parser.add_argument('--customvideo_config', type=str, default='')
parser.add_argument('--tts', type=str, default='edgetts') #xtts gpt-sovits cosyvoice
parser.add_argument('--REF_FILE', type=str, default=None)
parser.add_argument('--REF_TEXT', type=str, default=None)
parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880') # http://localhost:9000
# parser.add_argument('--CHARACTER', type=str, default='test')
# parser.add_argument('--EMOTION', type=str, default='default')
parser.add_argument('--model', type=str, default='ernerf') #musetalk wav2lip
parser.add_argument('--gpu', type=int, default=0, help="指定使用的GPU编号,例如0表示第一张GPU,1表示第二张GPU")
parser.add_argument('--transport', type=str, default='rtcpush') #rtmp webrtc rtcpush
parser.add_argument('--push_url', type=str, default='http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream') #rtmp://localhost/live/livestream
parser.add_argument('--max_session', type=int, default=1) #multi session count
parser.add_argument('--listenport', type=int, default=8010)
opt = parser.parse_args()
#app.config.from_object(opt)
#print(app.config)
opt.customopt = []
if opt.customvideo_config!='':
with open(opt.customvideo_config,'r') as file:
opt.customopt = json.load(file)
if opt.model == 'ernerf':
from nerfreal import NeRFReal,load_model,load_avatar
model = load_model(opt)
avatar = load_avatar(opt)
# we still need test_loader to provide audio features for testing.
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = NeRFReal(opt, trainer, test_loader,audio_processor,audio_model)
# nerfreals.append(nerfreal)
elif opt.model == 'musetalk':
from musereal import MuseReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model()
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,model)
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = MuseReal(opt,audio_processor,vae, unet, pe,timesteps)
# nerfreals.append(nerfreal)
elif opt.model == 'wav2lip':
from lipreal import LipReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model("./models/wav2lip.pth", opt.gpu)
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,model,256)
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = LipReal(opt,model)
# nerfreals.append(nerfreal)
elif opt.model == 'ultralight':
from lightreal import LightReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model(opt)
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,avatar,160)
if opt.transport=='rtmp':
thread_quit = Event()
nerfreals[0] = build_nerfreal(0)
rendthrd = Thread(target=nerfreals[0].render,args=(thread_quit,))
rendthrd.start()
#############################################################################
# ASR处理函数已迁移到统一架构
# 通过 core.app_websocket_migration 模块提供
#############################################################################
appasync = web.Application()
appasync.on_shutdown.append(on_shutdown)
appasync.router.add_post("/offer", offer)
appasync.router.add_post("/human", human)
appasync.router.add_post("/humanaudio", humanaudio)
appasync.router.add_post("/set_audiotype", set_audiotype)
appasync.router.add_post("/record", record)
appasync.router.add_post("/interrupt_talk", interrupt_talk)
appasync.router.add_post("/is_speaking", is_speaking)
# 初始化统一WebSocket管理架构
websocket_migration = get_app_websocket_migration()
# 注册WebSocket接口 - 使用新的统一架构
setup_app_websocket_routes(appasync)
# 异步初始化将在服务器启动时进行
async def init_websocket_migration():
await initialize_app_websocket_migration()
logger.info("WebSocket迁移架构初始化完成")
# 添加启动时初始化
appasync.on_startup.append(lambda app: init_websocket_migration())
appasync.router.add_static('/',path='web')
# 服务端录音WebSocket接口已集成到统一架构中
# 通过 /ws 路由和消息类型区分访问:wsa_register_web, wsa_register_human 等
logger.info("主应用路由配置完成,WebSocket接口已统一到 /ws 路由")
# Configure default CORS settings.
cors = aiohttp_cors.setup(appasync, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
# Configure CORS on all routes.
for route in list(appasync.router.routes()):
cors.add(route)
pagename='webrtcapi.html'
if opt.transport=='rtmp':
pagename='echoapi.html'
elif opt.transport=='rtcpush':
pagename='rtcpushapi.html'
logger.info('start http server; http://<serverip>:'+str(opt.listenport)+'/'+pagename)
logger.info('如果使用webrtc,推荐访问webrtc集成前端: http://<serverip>:'+str(opt.listenport)+'/dashboard.html')
def run_server(runner):
global main_event_loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 设置全局事件循环引用,用于跨线程异步调用
main_event_loop = loop
logger.info("全局事件循环引用已设置")
loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, '0.0.0.0', opt.listenport)
loop.run_until_complete(site.start())
if opt.transport=='rtcpush':
for k in range(opt.max_session):
push_url = opt.push_url
if k!=0:
push_url = opt.push_url+str(k)
loop.run_until_complete(run(push_url,k))
loop.run_forever()
#Thread(target=run_server, args=(web.AppRunner(appasync),)).start()
run_server(web.AppRunner(appasync))
#app.on_shutdown.append(on_shutdown)
#app.router.add_post("/offer", offer)
# print('start websocket server')
# server = pywsgi.WSGIServer(('0.0.0.0', 8000), app, handler_class=WebSocketHandler)
# server.serve_forever()