Toggle navigation
Toggle navigation
This project
Loading...
Sign in
卢阳
/
front_backend_zImage
Go to a project
Toggle navigation
Projects
Groups
Snippets
Help
Toggle navigation pinning
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Network
Create a new issue
Builds
Commits
Authored by
ly0303521
2026-01-23 17:43:12 +0800
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
a60723fea5c1597b81a69beb7f1857d098fe598a
a60723fe
1 parent
99677130
TurboDiffusion与业务代码解耦。规范端口设置
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
168 additions
and
23 deletions
.env.sh
backend/main.py
backend/usage.json
backend/usage.lock_ai
public/config.js
start_all.sh
z-image-generator/App.tsx
z-image-generator/components/InputBar.tsx
z-image-generator/services/galleryService.ts
z-image-generator/services/videoService.ts
.env.sh
View file @
a60723f
# Shared Configuration
PUBLIC_IP
=
"106.120.52.146"
# Public Ports (External Access)
# Ports (External Access)
LOCAL_BACKEND_PORT
=
"7000"
PUBLIC_BACKEND_PORT
=
"37000"
LOCAL_FRONTEND_PORT
=
"7001"
PUBLIC_FRONTEND_PORT
=
"37001"
PUBLIC_TURBO_PORT
=
"37002"
PUBLIC_OSS_PORT
=
"34000"
LOCAL_TURBO_PORT
=
"8000"
PUBLIC_TURBO_PORT
=
"38000"
PUBLIC_OSS_PORT
=
"39997"
PUBLIC_ZIMAGE_PORT
=
"39009"
# Local Ports (Internal Bind)
LOCAL_BACKEND_PORT
=
"7000"
LOCAL_FRONTEND_PORT
=
"7001"
# Business Logic Configuration
VIDEO_GENERATION_LIMIT
=
"1"
...
...
backend/main.py
View file @
a60723f
...
...
@@ -10,7 +10,7 @@ from threading import Lock, RLock
from
typing
import
List
,
Literal
,
Optional
,
Dict
,
Any
import
httpx
from
fastapi
import
FastAPI
,
HTTPException
,
Query
from
fastapi
import
FastAPI
,
HTTPException
,
Query
,
UploadFile
,
File
,
Form
from
fastapi.middleware.cors
import
CORSMiddleware
from
pydantic
import
BaseModel
,
Field
,
ConfigDict
import
logging
...
...
@@ -26,6 +26,10 @@ GALLERY_MAX_ITEMS = int(os.getenv("GALLERY_MAX_ITEMS", "500"))
WHITELIST_PATH
=
Path
(
os
.
getenv
(
"WHITELIST_PATH"
,
Path
(
__file__
)
.
with_name
(
"whitelist.txt"
)))
ADMIN_ID
=
"86427531"
# --- Add this line to fix the port ---
TURBO_DIFFUSION_LOCAL_URL
=
os
.
getenv
(
"TURBO_DIFFUSION_LOCAL_URL"
,
"http://localhost:8000"
)
.
rstrip
(
"/"
)
# Load dynamic limits from config.js
CONFIG_JS_PATH
=
Path
(
__file__
)
.
parent
.
parent
/
"public"
/
"config.js"
...
...
@@ -58,19 +62,27 @@ LIMITS = load_limits_from_config()
class
UsageStore
:
def
__init__
(
self
,
path
:
Path
):
self
.
path
=
path
self
.
lock_path
=
path
.
with_suffix
(
".lock_ai"
)
if
not
self
.
path
.
exists
():
self
.
_write
({})
def
_read
(
self
)
->
dict
:
lock_file
=
open
(
self
.
lock_path
,
"w"
)
try
:
fcntl
.
flock
(
lock_file
,
fcntl
.
LOCK_EX
)
if
not
self
.
path
.
exists
():
return
{}
with
self
.
path
.
open
(
"r"
,
encoding
=
"utf-8"
)
as
f
:
return
json
.
load
(
f
)
except
(
FileNotFoundError
,
json
.
JSONDecodeError
):
return
{}
finally
:
fcntl
.
flock
(
lock_file
,
fcntl
.
LOCK_UN
)
lock_file
.
close
()
def
_write
(
self
,
data
:
dict
):
lock_file
=
open
(
self
.
lock_path
,
"w"
)
try
:
fcntl
.
flock
(
lock_file
,
fcntl
.
LOCK_EX
)
payload
=
json
.
dumps
(
data
,
ensure_ascii
=
False
,
indent
=
2
)
temp_path
=
self
.
path
.
with_suffix
(
".tmp_proxy"
)
with
temp_path
.
open
(
"w"
,
encoding
=
"utf-8"
)
as
f
:
...
...
@@ -78,6 +90,9 @@ class UsageStore:
temp_path
.
replace
(
self
.
path
)
except
Exception
as
e
:
logger
.
error
(
f
"Failed to write usage: {e}"
)
finally
:
fcntl
.
flock
(
lock_file
,
fcntl
.
LOCK_UN
)
lock_file
.
close
()
def
get_usage
(
self
,
user_id
:
str
)
->
dict
:
data
=
self
.
_read
()
...
...
@@ -90,6 +105,23 @@ class UsageStore:
user_data
[
"last_reset"
]
=
today
return
user_data
def
increment_used
(
self
,
user_id
:
str
):
data
=
self
.
_read
()
import
datetime
today
=
datetime
.
date
.
today
()
.
isoformat
()
user_data
=
data
.
get
(
user_id
,
{
"daily_used"
:
0
,
"bonus_count"
:
0
,
"last_reset"
:
today
})
if
user_data
.
get
(
"last_reset"
)
!=
today
:
user_data
[
"daily_used"
]
=
0
user_data
[
"last_reset"
]
=
today
if
user_data
[
"daily_used"
]
<
LIMITS
[
"VIDEO_GENERATION_LIMIT"
]:
user_data
[
"daily_used"
]
+=
1
else
:
user_data
[
"bonus_count"
]
=
max
(
0
,
user_data
.
get
(
"bonus_count"
,
0
)
-
1
)
data
[
user_id
]
=
user_data
self
.
_write
(
data
)
def
update_bonus
(
self
,
user_id
:
str
,
delta
:
int
):
data
=
self
.
_read
()
import
datetime
...
...
@@ -141,6 +173,8 @@ class GalleryImage(GalleryItem):
class
GalleryVideo
(
GalleryItem
):
generation_time
:
Optional
[
float
]
=
Field
(
default
=
None
,
alias
=
"generationTime"
)
seed
:
Optional
[
int
]
=
Field
(
default
=
None
,
ge
=
0
)
width
:
int
=
Field
(
1024
,
ge
=
64
,
le
=
2048
)
height
:
int
=
Field
(
1024
,
ge
=
64
,
le
=
2048
)
class
ImageGenerationResponse
(
BaseModel
):
image
:
Optional
[
str
]
=
None
...
...
@@ -345,6 +379,86 @@ async def get_user_usage(user_id: str):
"is_admin"
:
user_id
==
ADMIN_ID
}
@app.post
(
"/submit-video-job/"
)
async
def
submit_video_job_proxy
(
prompt
:
str
=
Form
(
...
),
image
:
UploadFile
=
File
(
...
),
author_id
:
str
=
Form
(
...
),
num_steps
:
Optional
[
int
]
=
Form
(
4
),
seed
:
Optional
[
int
]
=
Form
(
0
)
):
# 1. Check Usage
if
author_id
!=
ADMIN_ID
:
usage
=
usage_store
.
get_usage
(
author_id
)
limit
=
LIMITS
[
"VIDEO_GENERATION_LIMIT"
]
allowed
=
(
limit
+
usage
.
get
(
"bonus_count"
,
0
))
if
usage
[
"daily_used"
]
>=
allowed
:
logger
.
warning
(
f
"User {author_id} limit reached via proxy"
)
raise
HTTPException
(
status_code
=
403
,
detail
=
"今日生成次数已用完。点赞灵感图库的图片可增加次数!"
)
# 2. Forward to TurboDiffusion
url
=
f
"{TURBO_DIFFUSION_LOCAL_URL}/submit-job/"
# Prepare files and data for forwarding
file_content
=
await
image
.
read
()
files
=
{
'image'
:
(
image
.
filename
,
file_content
,
image
.
content_type
)
}
# Use the actual author_id now that we've disabled the check on the inference server
data
=
{
'prompt'
:
prompt
,
'author_id'
:
author_id
,
'num_steps'
:
str
(
num_steps
),
'seed'
:
str
(
seed
)
}
try
:
# Use a separate client or the app state client (but need to handle multipart)
# Using separate httpx call for simplicity with files
async
with
httpx
.
AsyncClient
(
timeout
=
30.0
)
as
client
:
resp
=
await
client
.
post
(
url
,
data
=
data
,
files
=
files
)
if
resp
.
status_code
!=
202
and
resp
.
status_code
!=
200
:
raise
HTTPException
(
status_code
=
resp
.
status_code
,
detail
=
f
"Inference service error: {resp.text}"
)
result
=
resp
.
json
()
# 3. Increment Usage (Only if successful)
if
author_id
!=
ADMIN_ID
:
usage_store
.
increment_used
(
author_id
)
return
result
except
httpx
.
RequestError
as
exc
:
raise
HTTPException
(
status_code
=
502
,
detail
=
f
"Video Inference Service unreachable: {exc}"
)
except
Exception
as
exc
:
# If we caught an HTTPException above, re-raise it
if
isinstance
(
exc
,
HTTPException
):
raise
exc
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Proxy error: {exc}"
)
@app.get
(
"/video-status/{task_id}"
)
async
def
get_video_status_proxy
(
task_id
:
str
):
url
=
f
"{TURBO_DIFFUSION_LOCAL_URL}/status/{task_id}"
try
:
async
with
httpx
.
AsyncClient
(
timeout
=
10.0
)
as
client
:
resp
=
await
client
.
get
(
url
)
if
resp
.
status_code
==
404
:
raise
HTTPException
(
status_code
=
404
,
detail
=
"Task not found"
)
if
resp
.
status_code
!=
200
:
raise
HTTPException
(
status_code
=
resp
.
status_code
,
detail
=
f
"Inference service error: {resp.text}"
)
return
resp
.
json
()
except
httpx
.
RequestError
as
exc
:
raise
HTTPException
(
status_code
=
502
,
detail
=
f
"Video Inference Service unreachable: {exc}"
)
except
Exception
as
exc
:
if
isinstance
(
exc
,
HTTPException
):
raise
exc
raise
HTTPException
(
status_code
=
500
,
detail
=
f
"Proxy error: {exc}"
)
@app.get
(
"/gallery/images"
)
async
def
gallery_images
(
limit
:
int
=
Query
(
200
,
ge
=
1
,
le
=
1000
),
author_id
:
Optional
[
str
]
=
Query
(
None
,
alias
=
"authorId"
)):
items
=
image_store
.
list_items
()
...
...
backend/usage.json
View file @
a60723f
{
"10773758"
:
{
"daily_used"
:
4
,
"bonus_count"
:
7
,
"last_reset"
:
"2026-01-20"
"daily_used"
:
1
,
"bonus_count"
:
8
,
"last_reset"
:
"2026-01-23"
},
"11110000"
:
{
"daily_used"
:
2
,
"bonus_count"
:
2
,
"last_reset"
:
"2026-01-20"
"daily_used"
:
1
,
"bonus_count"
:
0
,
"last_reset"
:
"2026-01-23"
}
}
\ No newline at end of file
...
...
backend/usage.lock_ai
0 → 100644
View file @
a60723f
public/config.js
View file @
a60723f
window
.
APP_CONFIG
=
{
Z_IMAGE_DIRECT_BASE_URL
:
"http://106.120.52.146:39009"
,
TURBO_DIFFUSION_API_URL
:
"http://106.120.52.146:37002"
,
VIDEO_OSS_BASE_URL
:
"http://106.120.52.146:34000"
,
TURBO_DIFFUSION_API_URL
:
"http://106.120.52.146:38000"
,
VIDEO_OSS_BASE_URL
:
"http://106.120.52.146:39997"
,
API_BASE_URL
:
"http://106.120.52.146:37000"
,
VIDEO_GENERATION_LIMIT
:
1
,
LIKES_FOR_REWARD
:
5
...
...
start_all.sh
View file @
a60723f
...
...
@@ -50,6 +50,9 @@ window.APP_CONFIG = {
LIKES_FOR_REWARD: ${LIKES_FOR_REWARD:-5}
};
EOF
export
TURBO_DIFFUSION_LOCAL_URL
=
"http://127.0.0.1:
$LOCAL_TURBO_PORT
"
export
VITE_API_BASE_URL
=
"http://
$PUBLIC_IP
:
$PUBLIC_BACKEND_PORT
"
export
WHITELIST_PATH
=
"/home/inspur/work_space/gen_img_video/TurboDiffusion-Space/TurboDiffusion/front_backend_zImage/backend/whitelist.txt"
echo
"Configuration generated."
...
...
@@ -93,8 +96,6 @@ fi
rotate_log
"
$BASE_DIR
/frontend.log"
export
VITE_API_BASE_URL
=
"http://
$PUBLIC_IP
:
$PUBLIC_BACKEND_PORT
"
nohup npm run dev -- --port
"
$LOCAL_FRONTEND_PORT
"
--host 0.0.0.0 > ../frontend.log 2>&1 &
echo
"Frontend started with PID:
$!
"
...
...
z-image-generator/App.tsx
View file @
a60723f
...
...
@@ -3,7 +3,7 @@ import { ImageItem, ImageGenerationParams, UserProfile, VideoStatus, UserUsage }
import { SHOWCASE_IMAGES, ADMIN_ID, VIDEO_OSS_BASE_URL } from './constants';
import { generateImage } from './services/imageService';
import { submitVideoJob, pollVideoStatus } from './services/videoService';
import { fetchGallery, fetchVideoGallery, saveVideo, toggleLike, deleteVideo, fetchUsage } from './services/galleryService';
import { fetchGallery, fetchVideoGallery, saveVideo, toggleLike, deleteVideo, fetchUsage
, incrementUsage
} from './services/galleryService';
import MasonryGrid from './components/MasonryGrid';
import InputBar from './components/InputBar';
import HistoryBar from './components/HistoryBar';
...
...
@@ -155,6 +155,7 @@ const App: React.FC = () => {
const savedVideo = await saveVideo(newVideoData);
setVideos(prev => [savedVideo, ...prev]);
syncUsage();
setTimeout(() => {
...
...
@@ -163,7 +164,21 @@ const App: React.FC = () => {
}, 3000);
} catch (err: any) {
console.error(err);
setError("视频生成失败。请确保视频生成服务正常运行。");
let errorMessage = "视频生成失败。请确保视频生成服务正常运行。";
if (err.message && err.message.includes("Job submission failed")) {
try {
const match = err.message.match(/Job submission failed: (.*)/);
if (match && match[1]) {
const errorJson = JSON.parse(match[1]);
if (errorJson.detail) {
errorMessage = errorJson.detail;
}
}
} catch (e) {
errorMessage = err.message;
}
}
setError(errorMessage);
setVideoStatus(null);
setIsGeneratingVideo(false);
}
...
...
z-image-generator/components/InputBar.tsx
View file @
a60723f
...
...
@@ -333,6 +333,7 @@ const InputBar: React.FC<InputBarProps> = ({ onGenerate, isGenerating, incomingP
)}
<button
type="button"
onClick={handleGenerate}
disabled={isGenerating || isSubmittingLocal}
className={`
...
...
z-image-generator/services/galleryService.ts
View file @
a60723f
...
...
@@ -100,4 +100,6 @@ export const deleteVideo = async (itemId: string, userId: string): Promise<void>
const
errorText
=
await
response
.
text
();
throw
new
Error
(
`
Delete
failed
(
$
{
response
.
status
}):
$
{
errorText
}
`
);
}
};
\ No newline at end of file
};
export
const
incrementUsage
=
async
(
userId
:
string
)
:
Promise
<
void
>
=>
{};
\ No newline at end of file
...
...
z-image-generator/services/videoService.ts
View file @
a60723f
import
{
TURBO_DIFFUSION_API_URL
}
from
'../constants'
;
import
{
API_BASE_URL
,
TURBO_DIFFUSION_API_URL
}
from
'../constants'
;
import
{
VideoStatus
}
from
'../types'
;
const
compressImage
=
async
(
file
:
File
)
:
Promise
<
File
>
=>
{
...
...
@@ -65,13 +65,23 @@ export const submitVideoJob = async (prompt: string, image: File, authorId: stri
formData
.
append
(
'author_id'
,
authorId
);
formData
.
append
(
'seed'
,
seed
.
toString
());
const
submitRes
=
await
fetch
(
`
$
{
TURBO_DIFFUSION_API_URL
}
/submit-job/
`
,
{
// Use the Backend Proxy instead of direct TurboDiffusion call
const
submitRes
=
await
fetch
(
`
$
{
API_BASE_URL
}
/submit-video-job/
`
,
{
method
:
'POST'
,
body
:
formData
,
});
if
(
!
submitRes
.
ok
)
{
const
errorText
=
await
submitRes
.
text
();
// Try to parse detailed JSON error if possible
try
{
const
errorJson
=
JSON
.
parse
(
errorText
);
if
(
errorJson
.
detail
)
{
throw
new
Error
(
`
Job
submission
failed
:
$
{
errorJson
.
detail
}
`
);
}
}
catch
(
e
)
{
// ignore json parse error
}
throw
new
Error
(
`
Job
submission
failed
:
$
{
errorText
}
`
);
}
...
...
@@ -95,7 +105,9 @@ export const pollVideoStatus = (
const
interval
=
setInterval
(
async
()
=>
{
try
{
const
res
=
await
fetch
(
`
$
{
TURBO_DIFFUSION_API_URL
}
/status/
$
{
taskId
}
`
);
// Use the Backend Proxy for status polling as well to avoid CORS/port issues
// We need to add this endpoint to the backend proxy
const
res
=
await
fetch
(
`
$
{
API_BASE_URL
}
/video-status/
$
{
taskId
}
`
);
if
(
!
res
.
ok
)
{
if
(
res
.
status
===
502
||
res
.
status
===
504
)
{
console
.
warn
(
`
Gateway
timeout
(
$
{
res
.
status
}),
retrying
poll
...
`
);
...
...
Please
register
or
login
to post a comment