diff --git "a/__lib__/app.py" "b/__lib__/app.py" --- "a/__lib__/app.py" +++ "b/__lib__/app.py" @@ -9,7 +9,7 @@ import util from util import (process_image_edit, process_text_to_image, process_image_upscale, process_face_swap, process_multi_image_edit, process_watermark_removal, download_and_check_result_nsfw, GoodWebsiteUrl, RateLimitConfig, create_mask_from_layers, - TASK_LOCK_MINUTES, PRINT_STATS_INTERVAL) + TASK_LOCK_MINUTES, PRINT_STATS_INTERVAL, NSFW_BLUR_SECONDS) from nfsw import NSFWDetector @@ -18,30 +18,8 @@ rate_limit_config = RateLimitConfig() # ============================================================================= -# i18n 翻译系统 - Load from encrypted modules +# IP归属地判断 - 保留用于限速和统计 # ============================================================================= -import sys -from pathlib import Path - -# Add i18n module to path -_i18n_module_path = Path(__file__).parent / "__lib__" / "i18n" -if str(_i18n_module_path) not in sys.path: - sys.path.insert(0, str(_i18n_module_path)) - -# Import encrypted i18n loader -from i18n import translations as _translations -translations = _translations - -def load_translations(): - """Compatibility function - translations are already loaded""" - return translations - -def t(key, lang="en"): - default_en = translations.get("en", {}) - lang_dict = translations.get(lang) or default_en - if key in lang_dict: - return lang_dict[key] - return default_en.get(key, key) def has_active_task(client_ip): @@ -368,16 +346,36 @@ def check_nsfw_for_result(result_url, country, current_count): return False -def create_nsfw_blurred_response(result_url, redirect_url, lang): +def create_nsfw_blurred_response(result_url, redirect_url): """ - 创建NSFW模糊处理后的响应HTML + 创建NSFW模糊处理后的响应HTML - 使用CSS filter动画实现延迟模糊效果(时间由NSFW_BLUR_SECONDS配置) 返回: (result_html, action_html) 或 None 如果模糊失败 """ - blurred_image = apply_gaussian_blur_to_image_url(result_url) - if blurred_image is None: - return None + # 创建唯一ID + unique_id = f"nsfw_{int(time.time() * 1000)}" + + # 计算动画时间点(在总时长中的百分比) + # 前N秒保持清晰,然后在1秒内过渡到模糊 + total_duration = NSFW_BLUR_SECONDS + 1 + clear_percentage = (NSFW_BLUR_SECONDS / total_duration) * 100 + + # 使用CSS filter实现模糊效果,从清晰到模糊的动画 + result_html = f""" + +
{t('seo_unlimited_desc', lang)}
+Access unlimited image generation, video creation, and advanced editing features. No time limitation, no ads, no watermark.
896×896 in under 10 seconds on RTX 5090 (32GB VRAM)
+4–8 NFE with Karras sigmas + RK2/RK4
3-second 320p video in 30 seconds
+Video latents + shared attention improve consistency
Real-time inference performance achieved
+Heun/RK4 + Flow Matching trajectory integration
- Omni Creator 2.0 is an 8-billion parameter native Multi-Modal Diffusion Transformer (MM-DiT). - It unifies Text-to-Image, high-fidelity pixel-level editing, and Image-to-Video generation inside a single differentiable architecture. -
-- Existing approaches often split into specialized systems: DiT-style generators can be strong for static synthesis but struggle with temporal coherence, while MLLM-based editors follow instructions well but may lose pixel-level fidelity. - Omni Creator 2.0 bridges both by design. -
-+ 8B-parameter native MM-DiT unifying T2I, pixel-level editing, and I2V generation. + Uses CLIP/T5-style text encoders and visual conditioners to deliver high-fidelity multi-modal results with shared transformer backbone. +
+ + +Multi-head attention with timestep-conditioned modulation
+Learned fusion of text, 3× images, and temporal context
+FP8 + RoPE + RMSNorm for production-scale inference
++ Sampling framework that couples Flow Matching, Karras sigmas, and Heun/RK4 ODE solvers for fast 4–8 step generation. +
+ + +Supports velocity/epsilon/sample prediction with flow-to-velocity conversion, enabling 4–8 step inference.
+Coarse-to-fine pipeline: ~70% steps for coarse draft, optional refine passes for details.
+Runge-Kutta 4th order solver with flow matching conditioning for optimal trajectory evolution.
+Given data distribution p₁(x) and noise distribution p₀(z) = N(0, I), the Rectified Flow defines:
+The velocity field v_θ(x_t, t) is trained to match the conditional velocity:
+At inference, we solve the ODE: dx/dt = v_θ(x_t, t) from t=0 to t=1
++ Dynamically balancing text, image references, and temporal context through learned gating mechanisms. +
+ +| Model | +Params | +Architecture | +Training | +Inference | +NFE | +Acceleration | +
|---|---|---|---|---|---|---|
| FLUX.2-Dev | +32B | +DiT + MM | +Flow Matching | +Euler/DPM | +50 | +FP8 + FlashAttn | +
| Qwen-Image | +20B | +DiT + MLLM | +Rectified Flow | +FlowMatch Euler | +30-50 | +Lightning (4-8步) | +
| Qwen-Image-Edit | +20B | +DiT + Dual-Branch | +Flow Matching | +Euler | +28-50 | +Prompt Rewrite | +
| HunyuanVideo | +13B+ | +AsymmDiT | +Diffusion | +Multi-step | +50+ | +FP8 + Multi-frame | +
| Wan2.2 | +5B/14B | +DiT + MoE | +Diffusion | +Multi-step | +30-50 | +MoE Routing + FP8 | +
| Z-Image-Turbo | +6B | +Distilled DiT | +Progressive Distill | +Few-step | +4-8 | +Teacher-Student | +
| Mochi | +10B | +Video DiT | +Diffusion | +Multi-step | +50+ | +ComfyUI Parallel | +
| ⭐ Omni Creator 2.0 | +8B | +MM-DiT + AMG | +π-Flow + FM | +RK4 Hybrid | +4-8 | +Policy Distill + Multi-Stage | +