ly0303521

TurboDiffusion与业务代码解耦。规范端口设置

1 # Shared Configuration 1 # Shared Configuration
2 PUBLIC_IP="106.120.52.146" 2 PUBLIC_IP="106.120.52.146"
3 3
4 -# Public Ports (External Access) 4 +# Ports (External Access)
  5 +LOCAL_BACKEND_PORT="7000"
5 PUBLIC_BACKEND_PORT="37000" 6 PUBLIC_BACKEND_PORT="37000"
  7 +LOCAL_FRONTEND_PORT="7001"
6 PUBLIC_FRONTEND_PORT="37001" 8 PUBLIC_FRONTEND_PORT="37001"
7 -PUBLIC_TURBO_PORT="37002"  
8 -PUBLIC_OSS_PORT="34000" 9 +LOCAL_TURBO_PORT="8000"
  10 +PUBLIC_TURBO_PORT="38000"
  11 +PUBLIC_OSS_PORT="39997"
9 PUBLIC_ZIMAGE_PORT="39009" 12 PUBLIC_ZIMAGE_PORT="39009"
10 13
11 -# Local Ports (Internal Bind)  
12 -LOCAL_BACKEND_PORT="7000"  
13 -LOCAL_FRONTEND_PORT="7001"  
14 14
15 # Business Logic Configuration 15 # Business Logic Configuration
16 VIDEO_GENERATION_LIMIT="1" 16 VIDEO_GENERATION_LIMIT="1"
@@ -10,7 +10,7 @@ from threading import Lock, RLock @@ -10,7 +10,7 @@ from threading import Lock, RLock
10 from typing import List, Literal, Optional, Dict, Any 10 from typing import List, Literal, Optional, Dict, Any
11 11
12 import httpx 12 import httpx
13 -from fastapi import FastAPI, HTTPException, Query 13 +from fastapi import FastAPI, HTTPException, Query, UploadFile, File, Form
14 from fastapi.middleware.cors import CORSMiddleware 14 from fastapi.middleware.cors import CORSMiddleware
15 from pydantic import BaseModel, Field, ConfigDict 15 from pydantic import BaseModel, Field, ConfigDict
16 import logging 16 import logging
@@ -26,6 +26,10 @@ GALLERY_MAX_ITEMS = int(os.getenv("GALLERY_MAX_ITEMS", "500")) @@ -26,6 +26,10 @@ GALLERY_MAX_ITEMS = int(os.getenv("GALLERY_MAX_ITEMS", "500"))
26 WHITELIST_PATH = Path(os.getenv("WHITELIST_PATH", Path(__file__).with_name("whitelist.txt"))) 26 WHITELIST_PATH = Path(os.getenv("WHITELIST_PATH", Path(__file__).with_name("whitelist.txt")))
27 ADMIN_ID = "86427531" 27 ADMIN_ID = "86427531"
28 28
  29 +# --- Add this line to fix the port ---
  30 +TURBO_DIFFUSION_LOCAL_URL = os.getenv("TURBO_DIFFUSION_LOCAL_URL", "http://localhost:8000").rstrip("/")
  31 +
  32 +
29 # Load dynamic limits from config.js 33 # Load dynamic limits from config.js
30 CONFIG_JS_PATH = Path(__file__).parent.parent / "public" / "config.js" 34 CONFIG_JS_PATH = Path(__file__).parent.parent / "public" / "config.js"
31 35
@@ -58,19 +62,27 @@ LIMITS = load_limits_from_config() @@ -58,19 +62,27 @@ LIMITS = load_limits_from_config()
58 class UsageStore: 62 class UsageStore:
59 def __init__(self, path: Path): 63 def __init__(self, path: Path):
60 self.path = path 64 self.path = path
  65 + self.lock_path = path.with_suffix(".lock_ai")
61 if not self.path.exists(): 66 if not self.path.exists():
62 self._write({}) 67 self._write({})
63 68
64 def _read(self) -> dict: 69 def _read(self) -> dict:
  70 + lock_file = open(self.lock_path, "w")
65 try: 71 try:
  72 + fcntl.flock(lock_file, fcntl.LOCK_EX)
66 if not self.path.exists(): return {} 73 if not self.path.exists(): return {}
67 with self.path.open("r", encoding="utf-8") as f: 74 with self.path.open("r", encoding="utf-8") as f:
68 return json.load(f) 75 return json.load(f)
69 except (FileNotFoundError, json.JSONDecodeError): 76 except (FileNotFoundError, json.JSONDecodeError):
70 return {} 77 return {}
  78 + finally:
  79 + fcntl.flock(lock_file, fcntl.LOCK_UN)
  80 + lock_file.close()
71 81
72 def _write(self, data: dict): 82 def _write(self, data: dict):
  83 + lock_file = open(self.lock_path, "w")
73 try: 84 try:
  85 + fcntl.flock(lock_file, fcntl.LOCK_EX)
74 payload = json.dumps(data, ensure_ascii=False, indent=2) 86 payload = json.dumps(data, ensure_ascii=False, indent=2)
75 temp_path = self.path.with_suffix(".tmp_proxy") 87 temp_path = self.path.with_suffix(".tmp_proxy")
76 with temp_path.open("w", encoding="utf-8") as f: 88 with temp_path.open("w", encoding="utf-8") as f:
@@ -78,6 +90,9 @@ class UsageStore: @@ -78,6 +90,9 @@ class UsageStore:
78 temp_path.replace(self.path) 90 temp_path.replace(self.path)
79 except Exception as e: 91 except Exception as e:
80 logger.error(f"Failed to write usage: {e}") 92 logger.error(f"Failed to write usage: {e}")
  93 + finally:
  94 + fcntl.flock(lock_file, fcntl.LOCK_UN)
  95 + lock_file.close()
81 96
82 def get_usage(self, user_id: str) -> dict: 97 def get_usage(self, user_id: str) -> dict:
83 data = self._read() 98 data = self._read()
@@ -90,6 +105,23 @@ class UsageStore: @@ -90,6 +105,23 @@ class UsageStore:
90 user_data["last_reset"] = today 105 user_data["last_reset"] = today
91 return user_data 106 return user_data
92 107
  108 + def increment_used(self, user_id: str):
  109 + data = self._read()
  110 + import datetime
  111 + today = datetime.date.today().isoformat()
  112 + user_data = data.get(user_id, {"daily_used": 0, "bonus_count": 0, "last_reset": today})
  113 + if user_data.get("last_reset") != today:
  114 + user_data["daily_used"] = 0
  115 + user_data["last_reset"] = today
  116 +
  117 + if user_data["daily_used"] < LIMITS["VIDEO_GENERATION_LIMIT"]:
  118 + user_data["daily_used"] += 1
  119 + else:
  120 + user_data["bonus_count"] = max(0, user_data.get("bonus_count", 0) - 1)
  121 +
  122 + data[user_id] = user_data
  123 + self._write(data)
  124 +
93 def update_bonus(self, user_id: str, delta: int): 125 def update_bonus(self, user_id: str, delta: int):
94 data = self._read() 126 data = self._read()
95 import datetime 127 import datetime
@@ -141,6 +173,8 @@ class GalleryImage(GalleryItem): @@ -141,6 +173,8 @@ class GalleryImage(GalleryItem):
141 class GalleryVideo(GalleryItem): 173 class GalleryVideo(GalleryItem):
142 generation_time: Optional[float] = Field(default=None, alias="generationTime") 174 generation_time: Optional[float] = Field(default=None, alias="generationTime")
143 seed: Optional[int] = Field(default=None, ge=0) 175 seed: Optional[int] = Field(default=None, ge=0)
  176 + width: int = Field(1024, ge=64, le=2048)
  177 + height: int = Field(1024, ge=64, le=2048)
144 178
145 class ImageGenerationResponse(BaseModel): 179 class ImageGenerationResponse(BaseModel):
146 image: Optional[str] = None 180 image: Optional[str] = None
@@ -345,6 +379,86 @@ async def get_user_usage(user_id: str): @@ -345,6 +379,86 @@ async def get_user_usage(user_id: str):
345 "is_admin": user_id == ADMIN_ID 379 "is_admin": user_id == ADMIN_ID
346 } 380 }
347 381
  382 +@app.post("/submit-video-job/")
  383 +async def submit_video_job_proxy(
  384 + prompt: str = Form(...),
  385 + image: UploadFile = File(...),
  386 + author_id: str = Form(...),
  387 + num_steps: Optional[int] = Form(4),
  388 + seed: Optional[int] = Form(0)
  389 +):
  390 +
  391 + # 1. Check Usage
  392 + if author_id != ADMIN_ID:
  393 + usage = usage_store.get_usage(author_id)
  394 + limit = LIMITS["VIDEO_GENERATION_LIMIT"]
  395 + allowed = (limit + usage.get("bonus_count", 0))
  396 + if usage["daily_used"] >= allowed:
  397 + logger.warning(f"User {author_id} limit reached via proxy")
  398 + raise HTTPException(status_code=403, detail="今日生成次数已用完。点赞灵感图库的图片可增加次数!")
  399 +
  400 + # 2. Forward to TurboDiffusion
  401 + url = f"{TURBO_DIFFUSION_LOCAL_URL}/submit-job/"
  402 +
  403 + # Prepare files and data for forwarding
  404 + file_content = await image.read()
  405 + files = {
  406 + 'image': (image.filename, file_content, image.content_type)
  407 + }
  408 +
  409 + # Use the actual author_id now that we've disabled the check on the inference server
  410 + data = {
  411 + 'prompt': prompt,
  412 + 'author_id': author_id,
  413 + 'num_steps': str(num_steps),
  414 + 'seed': str(seed)
  415 + }
  416 +
  417 + try:
  418 + # Use a separate client or the app state client (but need to handle multipart)
  419 + # Using separate httpx call for simplicity with files
  420 + async with httpx.AsyncClient(timeout=30.0) as client:
  421 + resp = await client.post(url, data=data, files=files)
  422 +
  423 + if resp.status_code != 202 and resp.status_code != 200:
  424 + raise HTTPException(status_code=resp.status_code, detail=f"Inference service error: {resp.text}")
  425 +
  426 + result = resp.json()
  427 +
  428 + # 3. Increment Usage (Only if successful)
  429 + if author_id != ADMIN_ID:
  430 + usage_store.increment_used(author_id)
  431 +
  432 + return result
  433 +
  434 + except httpx.RequestError as exc:
  435 + raise HTTPException(status_code=502, detail=f"Video Inference Service unreachable: {exc}")
  436 + except Exception as exc:
  437 + # If we caught an HTTPException above, re-raise it
  438 + if isinstance(exc, HTTPException):
  439 + raise exc
  440 + raise HTTPException(status_code=500, detail=f"Proxy error: {exc}")
  441 +
  442 +@app.get("/video-status/{task_id}")
  443 +async def get_video_status_proxy(task_id: str):
  444 + url = f"{TURBO_DIFFUSION_LOCAL_URL}/status/{task_id}"
  445 + try:
  446 + async with httpx.AsyncClient(timeout=10.0) as client:
  447 + resp = await client.get(url)
  448 +
  449 + if resp.status_code == 404:
  450 + raise HTTPException(status_code=404, detail="Task not found")
  451 + if resp.status_code != 200:
  452 + raise HTTPException(status_code=resp.status_code, detail=f"Inference service error: {resp.text}")
  453 +
  454 + return resp.json()
  455 + except httpx.RequestError as exc:
  456 + raise HTTPException(status_code=502, detail=f"Video Inference Service unreachable: {exc}")
  457 + except Exception as exc:
  458 + if isinstance(exc, HTTPException):
  459 + raise exc
  460 + raise HTTPException(status_code=500, detail=f"Proxy error: {exc}")
  461 +
348 @app.get("/gallery/images") 462 @app.get("/gallery/images")
349 async def gallery_images(limit: int = Query(200, ge=1, le=1000), author_id: Optional[str] = Query(None, alias="authorId")): 463 async def gallery_images(limit: int = Query(200, ge=1, le=1000), author_id: Optional[str] = Query(None, alias="authorId")):
350 items = image_store.list_items() 464 items = image_store.list_items()
1 { 1 {
2 "10773758": { 2 "10773758": {
3 - "daily_used": 4,  
4 - "bonus_count": 7,  
5 - "last_reset": "2026-01-20" 3 + "daily_used": 1,
  4 + "bonus_count": 8,
  5 + "last_reset": "2026-01-23"
6 }, 6 },
7 "11110000": { 7 "11110000": {
8 - "daily_used": 2,  
9 - "bonus_count": 2,  
10 - "last_reset": "2026-01-20" 8 + "daily_used": 1,
  9 + "bonus_count": 0,
  10 + "last_reset": "2026-01-23"
11 } 11 }
12 } 12 }
1 window.APP_CONFIG = { 1 window.APP_CONFIG = {
2 Z_IMAGE_DIRECT_BASE_URL: "http://106.120.52.146:39009", 2 Z_IMAGE_DIRECT_BASE_URL: "http://106.120.52.146:39009",
3 - TURBO_DIFFUSION_API_URL: "http://106.120.52.146:37002",  
4 - VIDEO_OSS_BASE_URL: "http://106.120.52.146:34000", 3 + TURBO_DIFFUSION_API_URL: "http://106.120.52.146:38000",
  4 + VIDEO_OSS_BASE_URL: "http://106.120.52.146:39997",
5 API_BASE_URL: "http://106.120.52.146:37000", 5 API_BASE_URL: "http://106.120.52.146:37000",
6 VIDEO_GENERATION_LIMIT: 1, 6 VIDEO_GENERATION_LIMIT: 1,
7 LIKES_FOR_REWARD: 5 7 LIKES_FOR_REWARD: 5
@@ -50,6 +50,9 @@ window.APP_CONFIG = { @@ -50,6 +50,9 @@ window.APP_CONFIG = {
50 LIKES_FOR_REWARD: ${LIKES_FOR_REWARD:-5} 50 LIKES_FOR_REWARD: ${LIKES_FOR_REWARD:-5}
51 }; 51 };
52 EOF 52 EOF
  53 +export TURBO_DIFFUSION_LOCAL_URL="http://127.0.0.1:$LOCAL_TURBO_PORT"
  54 +export VITE_API_BASE_URL="http://$PUBLIC_IP:$PUBLIC_BACKEND_PORT"
  55 +export WHITELIST_PATH="/home/inspur/work_space/gen_img_video/TurboDiffusion-Space/TurboDiffusion/front_backend_zImage/backend/whitelist.txt"
53 56
54 echo "Configuration generated." 57 echo "Configuration generated."
55 58
@@ -93,8 +96,6 @@ fi @@ -93,8 +96,6 @@ fi
93 96
94 rotate_log "$BASE_DIR/frontend.log" 97 rotate_log "$BASE_DIR/frontend.log"
95 98
96 -export VITE_API_BASE_URL="http://$PUBLIC_IP:$PUBLIC_BACKEND_PORT"  
97 -  
98 nohup npm run dev -- --port "$LOCAL_FRONTEND_PORT" --host 0.0.0.0 > ../frontend.log 2>&1 & 99 nohup npm run dev -- --port "$LOCAL_FRONTEND_PORT" --host 0.0.0.0 > ../frontend.log 2>&1 &
99 echo "Frontend started with PID: $!" 100 echo "Frontend started with PID: $!"
100 101
@@ -3,7 +3,7 @@ import { ImageItem, ImageGenerationParams, UserProfile, VideoStatus, UserUsage } @@ -3,7 +3,7 @@ import { ImageItem, ImageGenerationParams, UserProfile, VideoStatus, UserUsage }
3 import { SHOWCASE_IMAGES, ADMIN_ID, VIDEO_OSS_BASE_URL } from './constants'; 3 import { SHOWCASE_IMAGES, ADMIN_ID, VIDEO_OSS_BASE_URL } from './constants';
4 import { generateImage } from './services/imageService'; 4 import { generateImage } from './services/imageService';
5 import { submitVideoJob, pollVideoStatus } from './services/videoService'; 5 import { submitVideoJob, pollVideoStatus } from './services/videoService';
6 -import { fetchGallery, fetchVideoGallery, saveVideo, toggleLike, deleteVideo, fetchUsage } from './services/galleryService'; 6 +import { fetchGallery, fetchVideoGallery, saveVideo, toggleLike, deleteVideo, fetchUsage, incrementUsage } from './services/galleryService';
7 import MasonryGrid from './components/MasonryGrid'; 7 import MasonryGrid from './components/MasonryGrid';
8 import InputBar from './components/InputBar'; 8 import InputBar from './components/InputBar';
9 import HistoryBar from './components/HistoryBar'; 9 import HistoryBar from './components/HistoryBar';
@@ -155,6 +155,7 @@ const App: React.FC = () => { @@ -155,6 +155,7 @@ const App: React.FC = () => {
155 155
156 const savedVideo = await saveVideo(newVideoData); 156 const savedVideo = await saveVideo(newVideoData);
157 setVideos(prev => [savedVideo, ...prev]); 157 setVideos(prev => [savedVideo, ...prev]);
  158 +
158 syncUsage(); 159 syncUsage();
159 160
160 setTimeout(() => { 161 setTimeout(() => {
@@ -163,7 +164,21 @@ const App: React.FC = () => { @@ -163,7 +164,21 @@ const App: React.FC = () => {
163 }, 3000); 164 }, 3000);
164 } catch (err: any) { 165 } catch (err: any) {
165 console.error(err); 166 console.error(err);
166 - setError("视频生成失败。请确保视频生成服务正常运行。"); 167 + let errorMessage = "视频生成失败。请确保视频生成服务正常运行。";
  168 + if (err.message && err.message.includes("Job submission failed")) {
  169 + try {
  170 + const match = err.message.match(/Job submission failed: (.*)/);
  171 + if (match && match[1]) {
  172 + const errorJson = JSON.parse(match[1]);
  173 + if (errorJson.detail) {
  174 + errorMessage = errorJson.detail;
  175 + }
  176 + }
  177 + } catch (e) {
  178 + errorMessage = err.message;
  179 + }
  180 + }
  181 + setError(errorMessage);
167 setVideoStatus(null); 182 setVideoStatus(null);
168 setIsGeneratingVideo(false); 183 setIsGeneratingVideo(false);
169 } 184 }
@@ -333,6 +333,7 @@ const InputBar: React.FC<InputBarProps> = ({ onGenerate, isGenerating, incomingP @@ -333,6 +333,7 @@ const InputBar: React.FC<InputBarProps> = ({ onGenerate, isGenerating, incomingP
333 )} 333 )}
334 334
335 <button 335 <button
  336 + type="button"
336 onClick={handleGenerate} 337 onClick={handleGenerate}
337 disabled={isGenerating || isSubmittingLocal} 338 disabled={isGenerating || isSubmittingLocal}
338 className={` 339 className={`
@@ -100,4 +100,6 @@ export const deleteVideo = async (itemId: string, userId: string): Promise<void> @@ -100,4 +100,6 @@ export const deleteVideo = async (itemId: string, userId: string): Promise<void>
100 const errorText = await response.text(); 100 const errorText = await response.text();
101 throw new Error(`Delete failed (${response.status}): ${errorText}`); 101 throw new Error(`Delete failed (${response.status}): ${errorText}`);
102 } 102 }
103 -};  
  103 +};
  104 +
  105 +export const incrementUsage = async (userId: string): Promise<void> => {};
1 -import { TURBO_DIFFUSION_API_URL } from '../constants'; 1 +import { API_BASE_URL, TURBO_DIFFUSION_API_URL } from '../constants';
2 import { VideoStatus } from '../types'; 2 import { VideoStatus } from '../types';
3 3
4 const compressImage = async (file: File): Promise<File> => { 4 const compressImage = async (file: File): Promise<File> => {
@@ -65,13 +65,23 @@ export const submitVideoJob = async (prompt: string, image: File, authorId: stri @@ -65,13 +65,23 @@ export const submitVideoJob = async (prompt: string, image: File, authorId: stri
65 formData.append('author_id', authorId); 65 formData.append('author_id', authorId);
66 formData.append('seed', seed.toString()); 66 formData.append('seed', seed.toString());
67 67
68 - const submitRes = await fetch(`${TURBO_DIFFUSION_API_URL}/submit-job/`, { 68 + // Use the Backend Proxy instead of direct TurboDiffusion call
  69 + const submitRes = await fetch(`${API_BASE_URL}/submit-video-job/`, {
69 method: 'POST', 70 method: 'POST',
70 body: formData, 71 body: formData,
71 }); 72 });
72 73
73 if (!submitRes.ok) { 74 if (!submitRes.ok) {
74 const errorText = await submitRes.text(); 75 const errorText = await submitRes.text();
  76 + // Try to parse detailed JSON error if possible
  77 + try {
  78 + const errorJson = JSON.parse(errorText);
  79 + if (errorJson.detail) {
  80 + throw new Error(`Job submission failed: ${errorJson.detail}`);
  81 + }
  82 + } catch (e) {
  83 + // ignore json parse error
  84 + }
75 throw new Error(`Job submission failed: ${errorText}`); 85 throw new Error(`Job submission failed: ${errorText}`);
76 } 86 }
77 87
@@ -95,7 +105,9 @@ export const pollVideoStatus = ( @@ -95,7 +105,9 @@ export const pollVideoStatus = (
95 105
96 const interval = setInterval(async () => { 106 const interval = setInterval(async () => {
97 try { 107 try {
98 - const res = await fetch(`${TURBO_DIFFUSION_API_URL}/status/${taskId}`); 108 + // Use the Backend Proxy for status polling as well to avoid CORS/port issues
  109 + // We need to add this endpoint to the backend proxy
  110 + const res = await fetch(`${API_BASE_URL}/video-status/${taskId}`);
99 if (!res.ok) { 111 if (!res.ok) {
100 if (res.status === 502 || res.status === 504) { 112 if (res.status === 502 || res.status === 504) {
101 console.warn(`Gateway timeout (${res.status}), retrying poll...`); 113 console.warn(`Gateway timeout (${res.status}), retrying poll...`);