app.py
36.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
###############################################################################
# Copyright (C) 2024 LiveTalking@lipku https://github.com/lipku/LiveTalking
# email: lipku@foxmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# server.py
from flask import Flask, render_template,send_from_directory,request, jsonify
from flask_sockets import Sockets
import base64
import json
#import gevent
#from gevent import pywsgi
#from geventwebsocket.handler import WebSocketHandler
import re
import numpy as np
from threading import Thread,Event
#import multiprocessing
import torch.multiprocessing as mp
from aiohttp import web, WSMsgType
import aiohttp
import aiohttp_cors
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.rtcrtpsender import RTCRtpSender
from webrtc import HumanPlayer
from basereal import BaseReal
from llm import llm_response
import argparse
import random
import shutil
import asyncio
import torch
from typing import Dict
from logger import logger
import gc
import weakref
import time
app = Flask(__name__)
#sockets = Sockets(app)
nerfreals:Dict[int, BaseReal] = {} #sessionid:BaseReal
websocket_connections:Dict[int, weakref.WeakSet] = {} #sessionid:websocket_connections
opt = None
model = None
avatar = None
#####webrtc###############################
pcs = set()
# WebSocket消息推送函数
async def broadcast_message_to_session(sessionid: int, message_type: str, content: str, source: str = "数字人回复", model_info: str = None, request_source: str = "页面"):
"""向指定会话的所有WebSocket连接推送消息"""
logger.info(f'[SessionID:{sessionid}] 开始推送消息: {message_type}, source: {source}, content: {content[:50]}...')
logger.info(f'[SessionID:{sessionid}] 当前websocket_connections keys: {list(websocket_connections.keys())}')
if sessionid not in websocket_connections:
logger.warning(f'[SessionID:{sessionid}] 会话不存在于websocket_connections中')
return
logger.info(f'[SessionID:{sessionid}] 找到会话,连接数量: {len(websocket_connections[sessionid])}')
message = {
"type": "chat_message",
"data": {
"sessionid": sessionid,
"message_type": message_type,
"content": content,
"source": source,
"model_info": model_info,
"request_source": request_source,
"timestamp": time.time()
}
}
# 获取该会话的所有WebSocket连接
connections = list(websocket_connections[sessionid])
# 向所有连接发送消息
logger.info(f'[SessionID:{sessionid}] 准备向{len(connections)}个连接发送消息')
for i, ws in enumerate(connections):
try:
logger.info(f'[SessionID:{sessionid}] 检查连接{i+1}: closed={ws.closed}')
if not ws.closed:
logger.info(f'[SessionID:{sessionid}] 向连接{i+1}发送消息: {json.dumps(message)}')
await ws.send_str(json.dumps(message))
logger.info(f'[SessionID:{sessionid}] 连接{i+1}消息发送成功: {message_type} from {request_source}')
else:
logger.warning(f'[SessionID:{sessionid}] 连接{i+1}已关闭,跳过发送')
except Exception as e:
logger.error(f'[SessionID:{sessionid}] 连接{i+1}发送失败: {e}')
# WebSocket处理器
async def websocket_handler(request):
"""处理WebSocket连接"""
ws = web.WebSocketResponse()
await ws.prepare(request)
sessionid = None
logger.info('New WebSocket connection established')
try:
async for msg in ws:
if msg.type == WSMsgType.TEXT:
try:
data = json.loads(msg.data)
if data.get('type') == 'login':
sessionid = data.get('sessionid', 0)
logger.info(f'[SessionID:{sessionid}] 收到登录请求,当前连接池: {list(websocket_connections.keys())}')
# 初始化该会话的WebSocket连接集合
if sessionid not in websocket_connections:
websocket_connections[sessionid] = weakref.WeakSet()
logger.info(f'[SessionID:{sessionid}] 创建新的连接集合')
# 添加当前连接到会话
websocket_connections[sessionid].add(ws)
logger.info(f'[SessionID:{sessionid}] 连接已添加,当前会话连接数: {len(websocket_connections[sessionid])}')
logger.info(f'[SessionID:{sessionid}] WebSocket client logged in')
# 发送登录确认
await ws.send_str(json.dumps({
"type": "login_success",
"sessionid": sessionid,
"message": "WebSocket连接成功"
}))
elif data.get('type') == 'ping':
# 心跳检测
await ws.send_str(json.dumps({"type": "pong"}))
except json.JSONDecodeError:
logger.error('Invalid JSON received from WebSocket')
except Exception as e:
logger.error(f'Error processing WebSocket message: {e}')
elif msg.type == WSMsgType.ERROR:
logger.error(f'WebSocket error: {ws.exception()}')
break
except Exception as e:
logger.error(f'WebSocket connection error: {e}')
finally:
if sessionid is not None:
logger.info(f'[SessionID:{sessionid}] WebSocket connection closed')
else:
logger.info('WebSocket connection closed')
return ws
def randN(N)->int:
'''生成长度为 N的随机数 '''
min = pow(10, N - 1)
max = pow(10, N)
return random.randint(min, max - 1)
def build_nerfreal(sessionid:int)->BaseReal:
import time
import gc
opt.sessionid=sessionid
logger.info('[SessionID:%d] Building %s model instance' % (sessionid, opt.model))
try:
model_start = time.time()
if opt.model == 'wav2lip':
logger.info('[SessionID:%d] Loading Wav2Lip model...' % sessionid)
from lipreal import LipReal
nerfreal = LipReal(opt,model,avatar)
elif opt.model == 'musetalk':
logger.info('[SessionID:%d] Loading MuseTalk model...' % sessionid)
from musereal import MuseReal
nerfreal = MuseReal(opt,model,avatar)
elif opt.model == 'ernerf':
logger.info('[SessionID:%d] Loading ERNeRF model...' % sessionid)
from nerfreal import NeRFReal
nerfreal = NeRFReal(opt,model,avatar)
elif opt.model == 'ultralight':
logger.info('[SessionID:%d] Loading UltraLight model...' % sessionid)
from lightreal import LightReal
nerfreal = LightReal(opt,model,avatar)
else:
raise ValueError(f"Unknown model type: {opt.model}")
model_end = time.time()
model_duration = model_end - model_start
logger.info('[SessionID:%d] %s model loaded successfully in %.3f seconds' % (sessionid, opt.model, model_duration))
# 强制垃圾回收以释放内存
gc.collect()
return nerfreal
except Exception as e:
logger.error('[SessionID:%d] Failed to build %s model: %s' % (sessionid, opt.model, str(e)))
# 清理可能的部分初始化资源
gc.collect()
raise e
#@app.route('/offer', methods=['POST'])
async def offer(request):
params = await request.json()
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
if len(nerfreals) >= opt.max_session:
logger.info('reach max session')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": "reach max session"}
),
)
sessionid = randN(6) #len(nerfreals)
logger.info('[SessionID:%d] Starting session initialization',sessionid)
nerfreals[sessionid] = None
# 记录模型初始化开始时间
import time
model_init_start = time.time()
logger.info('[SessionID:%d] Starting model initialization for %s' % (sessionid, opt.model))
nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal,sessionid)
# 记录模型初始化完成时间
model_init_end = time.time()
init_duration = model_init_end - model_init_start
logger.info('[SessionID:%d] Model initialization completed in %.3f seconds' % (sessionid, init_duration))
nerfreals[sessionid] = nerfreal
pc = RTCPeerConnection()
pcs.add(pc)
# 添加ICE连接状态监控
@pc.on("iceconnectionstatechange")
async def on_iceconnectionstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] ICE connection state changed to %s at %.3f" % (sessionid, pc.iceConnectionState, timestamp))
if pc.iceConnectionState == "checking":
logger.info("[SessionID:%d] ICE connectivity checks in progress..." % sessionid)
elif pc.iceConnectionState == "connected":
logger.info("[SessionID:%d] ICE connection established" % sessionid)
elif pc.iceConnectionState == "completed":
logger.info("[SessionID:%d] ICE connection completed" % sessionid)
elif pc.iceConnectionState == "failed":
logger.error("[SessionID:%d] ICE connection failed" % sessionid)
elif pc.iceConnectionState == "disconnected":
logger.warning("[SessionID:%d] ICE connection disconnected" % sessionid)
# 添加ICE候选者收集状态监控
@pc.on("icegatheringstatechange")
async def on_icegatheringstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] ICE gathering state changed to %s at %.3f" % (sessionid, pc.iceGatheringState, timestamp))
if pc.iceGatheringState == "gathering":
logger.info("[SessionID:%d] ICE candidates gathering..." % sessionid)
elif pc.iceGatheringState == "complete":
logger.info("[SessionID:%d] ICE candidates gathering completed" % sessionid)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
import time
timestamp = time.time()
logger.info("[SessionID:%d] Connection state changed to %s at %.3f" % (sessionid, pc.connectionState, timestamp))
if pc.connectionState == "connecting":
logger.info("[SessionID:%d] WebRTC connection establishing..." % sessionid)
elif pc.connectionState == "connected":
logger.info("[SessionID:%d] WebRTC connection established successfully" % sessionid)
elif pc.connectionState == "failed":
logger.error("[SessionID:%d] WebRTC connection failed" % sessionid)
await pc.close()
pcs.discard(pc)
if sessionid in nerfreals:
del nerfreals[sessionid]
elif pc.connectionState == "closed":
logger.info("[SessionID:%d] WebRTC connection closed" % sessionid)
pcs.discard(pc)
if sessionid in nerfreals:
del nerfreals[sessionid]
gc.collect()
# 记录音视频轨道初始化开始时间
track_init_start = time.time()
logger.info('[SessionID:%d] Initializing audio/video tracks' % sessionid)
player = HumanPlayer(nerfreals[sessionid])
logger.info('[SessionID:%d] HumanPlayer created' % sessionid)
audio_sender = pc.addTrack(player.audio)
logger.info('[SessionID:%d] Audio track added' % sessionid)
video_sender = pc.addTrack(player.video)
logger.info('[SessionID:%d] Video track added' % sessionid)
# 记录音视频轨道初始化完成时间
track_init_end = time.time()
track_duration = track_init_end - track_init_start
logger.info('[SessionID:%d] Audio/video tracks initialized in %.3f seconds' % (sessionid, track_duration))
# 记录编解码器配置开始时间
codec_start = time.time()
logger.info('[SessionID:%d] Configuring video codecs' % sessionid)
capabilities = RTCRtpSender.getCapabilities("video")
preferences = list(filter(lambda x: x.name == "H264", capabilities.codecs))
preferences += list(filter(lambda x: x.name == "VP8", capabilities.codecs))
preferences += list(filter(lambda x: x.name == "rtx", capabilities.codecs))
logger.info('[SessionID:%d] Available codecs: %s' % (sessionid, [codec.name for codec in preferences]))
transceiver = pc.getTransceivers()[1]
transceiver.setCodecPreferences(preferences)
# 记录编解码器配置完成时间
codec_end = time.time()
codec_duration = codec_end - codec_start
logger.info('[SessionID:%d] Video codecs configured in %.3f seconds' % (sessionid, codec_duration))
# 记录SDP协商开始时间
import time
sdp_start = time.time()
logger.info('[SessionID:%d] Starting SDP negotiation' % sessionid)
await pc.setRemoteDescription(offer)
logger.info('[SessionID:%d] Remote description set' % sessionid)
answer = await pc.createAnswer()
logger.info('[SessionID:%d] Answer created' % sessionid)
await pc.setLocalDescription(answer)
# 记录SDP协商完成时间
sdp_end = time.time()
sdp_duration = sdp_end - sdp_start
logger.info('[SessionID:%d] SDP negotiation completed in %.3f seconds' % (sessionid, sdp_duration))
#return jsonify({"sdp": pc.localDescription.sdp, "type": pc.localDescription.type})
return web.Response(
content_type="application/json",
text=json.dumps(
{"sdp": pc.localDescription.sdp, "type": pc.localDescription.type, "sessionid":sessionid}
),
)
async def human(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
user_message = params.get('text', '')
message_type = params.get('type', 'echo')
# 检测请求来源(通过User-Agent或自定义头部)
user_agent = request.headers.get('User-Agent', '')
request_source = "第三方服务" if 'python' in user_agent.lower() or 'curl' in user_agent.lower() or 'postman' in user_agent.lower() else "页面"
# 如果有自定义来源标识,优先使用
if 'X-Request-Source' in request.headers:
request_source = request.headers['X-Request-Source']
if params.get('interrupt'):
nerfreals[sessionid].flush_talk()
# 推送用户消息到WebSocket(统一推送所有用户输入)
await broadcast_message_to_session(sessionid, message_type, user_message, "用户", None, request_source)
ai_response = None
model_info = None
if message_type == 'echo':
nerfreals[sessionid].put_msg_txt(user_message)
ai_response = user_message
model_info = "Echo模式"
# 推送回音消息到WebSocket
await broadcast_message_to_session(sessionid, 'echo', user_message, "回音", model_info, request_source)
elif message_type == 'chat':
# 获取当前使用的大模型信息
model_info = getattr(nerfreals[sessionid], 'llm_model_name', 'Unknown LLM')
if hasattr(nerfreals[sessionid], 'llm') and hasattr(nerfreals[sessionid].llm, 'model_name'):
model_info = nerfreals[sessionid].llm.model_name
ai_response = await asyncio.get_event_loop().run_in_executor(None, llm_response, user_message, nerfreals[sessionid])
# 推送AI回复到WebSocket(包含大模型信息)
await broadcast_message_to_session(sessionid, 'chat', ai_response, "AI助手", model_info, request_source)
# 注释掉的代码保持不变,因为数字人回复通过其他方式处理
#nerfreals[sessionid].put_msg_txt(ai_response)
# 只返回简单的处理状态,所有数据通过WebSocket推送
return web.Response(
content_type="application/json",
text=json.dumps({
"code": 0,
"message": "消息已处理并推送"
}),
)
except Exception as e:
error_msg = str(e)
logger.exception('exception:')
# 推送错误消息到WebSocket
try:
sessionid = params.get('sessionid', 0) if 'params' in locals() else 0
request_source = "页面" # 默认来源
await broadcast_message_to_session(sessionid, 'error', f"处理消息时发生错误: {error_msg}", "系统错误", "Error", request_source)
except:
pass # 如果WebSocket推送也失败,不影响HTTP响应
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": error_msg, "error_details": error_msg}
),
)
async def interrupt_talk(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
nerfreals[sessionid].flush_talk()
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "msg":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
from pydub import AudioSegment
from io import BytesIO
async def humanaudio(request):
try:
params = await request.json()
sessionid = int(params.get('sessionid', 0))
fileobj = params.get('file_url')
# 获取音频文件数据
if isinstance(fileobj, str) and fileobj.startswith("http"):
async with aiohttp.ClientSession() as session:
async with session.get(fileobj) as response:
if response.status == 200:
filebytes = await response.read()
else:
return web.Response(
content_type="application/json",
text=json.dumps({"code": -1, "msg": "Error downloading file"})
)
# 根据 URL 后缀判断是否为 MP3 文件
is_mp3 = fileobj.lower().endswith('.mp3')
else:
filename = fileobj.filename
filebytes = fileobj.file.read()
is_mp3 = filename.lower().endswith('.mp3')
if is_mp3:
audio = AudioSegment.from_file(BytesIO(filebytes), format="mp3")
out_io = BytesIO()
audio.export(out_io, format="wav")
filebytes = out_io.getvalue()
nerfreals[sessionid].put_audio_file(filebytes)
return web.Response(
content_type="application/json",
text=json.dumps({"code": 0, "msg": "ok"})
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps( {"code": -1, "msg": str(e)})
)
async def set_audiotype(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
nerfreals[sessionid].set_custom_state(params['audiotype'],params['reinit'])
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
async def record(request):
try:
params = await request.json()
sessionid = params.get('sessionid',0)
if params['type']=='start_record':
# nerfreals[sessionid].put_msg_txt(params['text'])
nerfreals[sessionid].start_recording()
elif params['type']=='end_record':
nerfreals[sessionid].stop_recording()
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data":"ok"}
),
)
except Exception as e:
logger.exception('exception:')
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": -1, "msg": str(e)}
),
)
async def is_speaking(request):
params = await request.json()
sessionid = params.get('sessionid',0)
return web.Response(
content_type="application/json",
text=json.dumps(
{"code": 0, "data": nerfreals[sessionid].is_speaking()}
),
)
async def on_shutdown(app):
# close peer connections
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
async def post(url,data):
try:
async with aiohttp.ClientSession() as session:
async with session.post(url,data=data) as response:
return await response.text()
except aiohttp.ClientError as e:
logger.info(f'Error: {e}')
async def run(push_url,sessionid):
nerfreal = await asyncio.get_event_loop().run_in_executor(None, build_nerfreal,sessionid)
nerfreals[sessionid] = nerfreal
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on("connectionstatechange")
async def on_connectionstatechange():
logger.info("Connection state is %s" % pc.connectionState)
if pc.connectionState == "failed":
await pc.close()
pcs.discard(pc)
player = HumanPlayer(nerfreals[sessionid])
audio_sender = pc.addTrack(player.audio)
video_sender = pc.addTrack(player.video)
await pc.setLocalDescription(await pc.createOffer())
answer = await post(push_url,pc.localDescription.sdp)
await pc.setRemoteDescription(RTCSessionDescription(sdp=answer,type='answer'))
##########################################
# os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
# os.environ['MULTIPROCESSING_METHOD'] = 'forkserver'
if __name__ == '__main__':
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument('--pose', type=str, default="data/data_kf.json", help="transforms.json, pose source")
parser.add_argument('--au', type=str, default="data/au.csv", help="eye blink area")
parser.add_argument('--torso_imgs', type=str, default="", help="torso images path")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye")
parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use")
parser.add_argument('--workspace', type=str, default='data/video')
parser.add_argument('--seed', type=int, default=0)
### training options
parser.add_argument('--ckpt', type=str, default='data/pretrained/ngp_kf.pth')
parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)")
### loss set
parser.add_argument('--warmup_step', type=int, default=10000, help="warm up steps")
parser.add_argument('--amb_aud_loss', type=int, default=1, help="use ambient aud loss")
parser.add_argument('--amb_eye_loss', type=int, default=1, help="use ambient eye loss")
parser.add_argument('--unc_loss', type=int, default=1, help="use uncertainty loss")
parser.add_argument('--lambda_amb', type=float, default=1e-4, help="lambda for ambient loss")
### network backbone options
parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training")
parser.add_argument('--bg_img', type=str, default='white', help="background image")
parser.add_argument('--fbg', action='store_true', help="frame-wise bg")
parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes")
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye")
parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence")
parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform")
### dataset options
parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)")
parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.")
# (the default value is for the fox dataset)
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.")
parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3")
parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location")
parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)")
parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)")
parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable")
parser.add_argument('--init_lips', action='store_true', help="init lips region")
parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region")
parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...")
parser.add_argument('--torso', action='store_true', help="fix head and train torso")
parser.add_argument('--head_ckpt', type=str, default='', help="head model")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=450, help="GUI width")
parser.add_argument('--H', type=int, default=450, help="GUI height")
parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
### else
parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)")
parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)")
parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits")
parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off")
parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size")
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off")
parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension")
parser.add_argument('--part', action='store_true', help="use partial training data (1/10)")
parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)")
parser.add_argument('--train_camera', action='store_true', help="optimize camera pose")
parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size")
parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size")
# asr
parser.add_argument('--asr', action='store_true', help="load asr for real-time app")
parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input")
parser.add_argument('--asr_play', action='store_true', help="play out the audio")
#parser.add_argument('--asr_model', type=str, default='deepspeech')
parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') #
# parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self')
# parser.add_argument('--asr_model', type=str, default='facebook/hubert-large-ls960-ft')
parser.add_argument('--asr_save_feats', action='store_true')
# audio FPS
parser.add_argument('--fps', type=int, default=50)
# sliding window left-middle-right length (unit: 20ms)
parser.add_argument('-l', type=int, default=10)
parser.add_argument('-m', type=int, default=8)
parser.add_argument('-r', type=int, default=10)
parser.add_argument('--fullbody', action='store_true', help="fullbody human")
parser.add_argument('--fullbody_img', type=str, default='data/fullbody/img')
parser.add_argument('--fullbody_width', type=int, default=580)
parser.add_argument('--fullbody_height', type=int, default=1080)
parser.add_argument('--fullbody_offset_x', type=int, default=0)
parser.add_argument('--fullbody_offset_y', type=int, default=0)
#musetalk opt
parser.add_argument('--avatar_id', type=str, default='avator_1')
parser.add_argument('--bbox_shift', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=16)
# parser.add_argument('--customvideo', action='store_true', help="custom video")
# parser.add_argument('--customvideo_img', type=str, default='data/customvideo/img')
# parser.add_argument('--customvideo_imgnum', type=int, default=1)
parser.add_argument('--customvideo_config', type=str, default='')
parser.add_argument('--tts', type=str, default='edgetts') #xtts gpt-sovits cosyvoice
parser.add_argument('--REF_FILE', type=str, default=None)
parser.add_argument('--REF_TEXT', type=str, default=None)
parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880') # http://localhost:9000
# parser.add_argument('--CHARACTER', type=str, default='test')
# parser.add_argument('--EMOTION', type=str, default='default')
parser.add_argument('--model', type=str, default='ernerf') #musetalk wav2lip
parser.add_argument('--gpu', type=int, default=0, help="指定使用的GPU编号,例如0表示第一张GPU,1表示第二张GPU")
parser.add_argument('--transport', type=str, default='rtcpush') #rtmp webrtc rtcpush
parser.add_argument('--push_url', type=str, default='http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream') #rtmp://localhost/live/livestream
parser.add_argument('--max_session', type=int, default=1) #multi session count
parser.add_argument('--listenport', type=int, default=8010)
opt = parser.parse_args()
#app.config.from_object(opt)
#print(app.config)
opt.customopt = []
if opt.customvideo_config!='':
with open(opt.customvideo_config,'r') as file:
opt.customopt = json.load(file)
if opt.model == 'ernerf':
from nerfreal import NeRFReal,load_model,load_avatar
model = load_model(opt)
avatar = load_avatar(opt)
# we still need test_loader to provide audio features for testing.
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = NeRFReal(opt, trainer, test_loader,audio_processor,audio_model)
# nerfreals.append(nerfreal)
elif opt.model == 'musetalk':
from musereal import MuseReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model()
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,model)
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = MuseReal(opt,audio_processor,vae, unet, pe,timesteps)
# nerfreals.append(nerfreal)
elif opt.model == 'wav2lip':
from lipreal import LipReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model("./models/wav2lip.pth", opt.gpu)
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,model,256)
# for k in range(opt.max_session):
# opt.sessionid=k
# nerfreal = LipReal(opt,model)
# nerfreals.append(nerfreal)
elif opt.model == 'ultralight':
from lightreal import LightReal,load_model,load_avatar,warm_up
logger.info(opt)
model = load_model(opt)
avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,avatar,160)
if opt.transport=='rtmp':
thread_quit = Event()
nerfreals[0] = build_nerfreal(0)
rendthrd = Thread(target=nerfreals[0].render,args=(thread_quit,))
rendthrd.start()
#############################################################################
appasync = web.Application()
appasync.on_shutdown.append(on_shutdown)
appasync.router.add_post("/offer", offer)
appasync.router.add_post("/human", human)
appasync.router.add_post("/humanaudio", humanaudio)
appasync.router.add_post("/set_audiotype", set_audiotype)
appasync.router.add_post("/record", record)
appasync.router.add_post("/interrupt_talk", interrupt_talk)
appasync.router.add_post("/is_speaking", is_speaking)
appasync.router.add_get("/ws", websocket_handler)
appasync.router.add_static('/',path='web')
# Configure default CORS settings.
cors = aiohttp_cors.setup(appasync, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
# Configure CORS on all routes.
for route in list(appasync.router.routes()):
cors.add(route)
pagename='webrtcapi.html'
if opt.transport=='rtmp':
pagename='echoapi.html'
elif opt.transport=='rtcpush':
pagename='rtcpushapi.html'
logger.info('start http server; http://<serverip>:'+str(opt.listenport)+'/'+pagename)
logger.info('如果使用webrtc,推荐访问webrtc集成前端: http://<serverip>:'+str(opt.listenport)+'/dashboard.html')
def run_server(runner):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, '0.0.0.0', opt.listenport)
loop.run_until_complete(site.start())
if opt.transport=='rtcpush':
for k in range(opt.max_session):
push_url = opt.push_url
if k!=0:
push_url = opt.push_url+str(k)
loop.run_until_complete(run(push_url,k))
loop.run_forever()
#Thread(target=run_server, args=(web.AppRunner(appasync),)).start()
run_server(web.AppRunner(appasync))
#app.on_shutdown.append(on_shutdown)
#app.router.add_post("/offer", offer)
# print('start websocket server')
# server = pywsgi.WSGIServer(('0.0.0.0', 8000), app, handler_class=WebSocketHandler)
# server.serve_forever()