🔄 卡若AI 同步 2026-03-20 21:38 | 更新:水桥平台对接、卡木、总索引与入口、运营中枢工作台 | 排除 >20MB: 11 个

This commit is contained in:
2026-03-20 21:38:28 +08:00
parent 241d139ea9
commit a8e5dcce7e
14 changed files with 740 additions and 105 deletions

View File

@@ -1,6 +1,6 @@
{
"access_token": "u-dFTTY7qHFbSHNUsIRa4NqClh1ez1ghohVMGaZxk0274E",
"refresh_token": "ur-ePrUCxaTV8ipHNjZ.XICSYlh3A11ghOjr0GaVwk0271J",
"access_token": "u-elJqv3tWdeGrDSyG63Up4Vlh1KzxghqXN0GaENk0260E",
"refresh_token": "ur-eNhTsg0X9f5a7kk6iFBlqDlh1e1xghOrNwGaJB40261Y",
"name": "飞书用户",
"auth_time": "2026-03-20T11:02:06.903826"
"auth_time": "2026-03-20T21:02:49.258622"
}

View File

@@ -240,3 +240,27 @@
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/soul 派对 120场 20260320_output/成片/深度AI模型对比 哪个才是真正的AI不是语言模型.mp4", "title": "深度对比各大AI模型哪个才是真正的智能而不只是语言模型", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.21168994903564453, "timestamp": "2026-03-20 05:40:07"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/soul 派对 120场 20260320_output/成片/疗愈师配AI助手能收多少钱 一个小团队5万到10万.mp4", "title": "疗愈师+AI助手组合一个小团队月收5万到10万", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.18154072761535645, "timestamp": "2026-03-20 05:40:10"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/soul 派对 120场 20260320_output/成片/赚钱没那么复杂,自信心才是核心问题.mp4", "title": "获得收益真没那么复杂,自信心才是卡住你的核心问题", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.19428515434265137, "timestamp": "2026-03-20 05:40:13"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/127场推流273万进房4万6流量密码就这几个.mp4", "title": "场推流273万进房4万6流量密码就这几个", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 4.230232000350952, "timestamp": "2026-03-20 16:37:06"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/不学AI连工作都找不到了学历不重要干货和AI实操最重要.mp4", "title": "不学AI连工作都找不到了学历不重要干货和AI实操最重要", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 2.2930691242218018, "timestamp": "2026-03-20 16:37:12"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/保镖服务后端才是真赚钱,接触高端客户赚信任关系的钱.mp4", "title": "保镖服务后端才是真获得收益,接触高端客户赚信任关系的钱", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.320681095123291, "timestamp": "2026-03-20 16:37:15"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/分对方他挣不到的钱,员工才愿意跟你干.mp4", "title": "分对方他挣不到的钱,员工才愿意跟你干", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.489332914352417, "timestamp": "2026-03-20 16:37:19"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/想拿2万工资AI月消耗至少1000块这是硬指标.mp4", "title": "想拿2万工资AI月消耗至少1000块这是硬指标", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 1.1175451278686523, "timestamp": "2026-03-20 16:37:23"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/流量端和交付端别同时扛,缺流量就做群主开派对.mp4", "title": "流量端和交付端别同时扛,缺流量就做群主开派对", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.36455702781677246, "timestamp": "2026-03-20 16:37:26"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/游戏辅助AI模型高收益高风险3到6个月必须收手.mp4", "title": "游戏辅助AI模型高收益高风险3到6个月必须收手", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.8829331398010254, "timestamp": "2026-03-20 16:37:30"}
{"platform": "抖音", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/面试三面流程 简历+测试+试岗300份筛到2到3个人.mp4", "title": "面试三面流程 简历+测试+试岗300份筛到2到3个人", "success": false, "status": "error", "message": "Cookie 已过期", "elapsed_sec": 0.15245604515075684, "timestamp": "2026-03-20 16:37:33"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/127场推流273万进房4万6流量密码就这几个.mp4", "title": "场推流273万进房4万6流量密码就这几个", "success": false, "status": "failed", "message": "Playwright: 未找到上传控件", "elapsed_sec": 6.741008043289185, "timestamp": "2026-03-20 16:37:19"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/不学AI连工作都找不到了学历不重要干货和AI实操最重要.mp4", "title": "不学AI连工作都找不到了学历不重要干货和AI实操最重要", "success": true, "status": "reviewing", "message": "纯API投稿成功 (7.4s)", "elapsed_sec": 7.3929102420806885, "timestamp": "2026-03-20 16:37:30"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/保镖服务后端才是真赚钱,接触高端客户赚信任关系的钱.mp4", "title": "保镖服务后端才是真获得收益,接触高端客户赚信任关系的钱", "success": true, "status": "reviewing", "message": "纯API投稿成功 (2.2s)", "elapsed_sec": 2.208482027053833, "timestamp": "2026-03-20 16:37:35"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/分对方他挣不到的钱,员工才愿意跟你干.mp4", "title": "分对方他挣不到的钱,员工才愿意跟你干", "success": true, "status": "reviewing", "message": "纯API投稿成功 (3.4s)", "elapsed_sec": 3.445220947265625, "timestamp": "2026-03-20 16:37:42"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/想拿2万工资AI月消耗至少1000块这是硬指标.mp4", "title": "想拿2万工资AI月消耗至少1000块这是硬指标", "success": true, "status": "reviewing", "message": "纯API投稿成功 (2.0s)", "elapsed_sec": 1.978193998336792, "timestamp": "2026-03-20 16:37:47"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/流量端和交付端别同时扛,缺流量就做群主开派对.mp4", "title": "流量端和交付端别同时扛,缺流量就做群主开派对", "success": true, "status": "reviewing", "message": "纯API投稿成功 (2.1s)", "elapsed_sec": 2.136302947998047, "timestamp": "2026-03-20 16:37:52"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/游戏辅助AI模型高收益高风险3到6个月必须收手.mp4", "title": "游戏辅助AI模型高收益高风险3到6个月必须收手", "success": true, "status": "reviewing", "message": "纯API投稿成功 (2.5s)", "elapsed_sec": 2.5074386596679688, "timestamp": "2026-03-20 16:37:57"}
{"platform": "B站", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/面试三面流程 简历+测试+试岗300份筛到2到3个人.mp4", "title": "面试三面流程 简历+测试+试岗300份筛到2到3个人", "success": true, "status": "reviewing", "message": "纯API投稿成功 (2.9s)", "elapsed_sec": 2.86631178855896, "timestamp": "2026-03-20 16:38:03"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/127场推流273万进房4万6流量密码就这几个.mp4", "title": "场推流273万进房4万6流量密码就这几个", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 54.56508994102478, "timestamp": "2026-03-20 16:38:24"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/不学AI连工作都找不到了学历不重要干货和AI实操最重要.mp4", "title": "不学AI连工作都找不到了学历不重要干货和AI实操最重要", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 58.11296081542969, "timestamp": "2026-03-20 16:39:49"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/保镖服务后端才是真赚钱,接触高端客户赚信任关系的钱.mp4", "title": "保镖服务后端才是真获得收益,接触高端客户赚信任关系的钱", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 57.71816396713257, "timestamp": "2026-03-20 16:41:13"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/分对方他挣不到的钱,员工才愿意跟你干.mp4", "title": "分对方他挣不到的钱,员工才愿意跟你干", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 50.6455979347229, "timestamp": "2026-03-20 16:42:31"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/想拿2万工资AI月消耗至少1000块这是硬指标.mp4", "title": "想拿2万工资AI月消耗至少1000块这是硬指标", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 49.91238975524902, "timestamp": "2026-03-20 16:43:47"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/流量端和交付端别同时扛,缺流量就做群主开派对.mp4", "title": "流量端和交付端别同时扛,缺流量就做群主开派对", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 49.58630394935608, "timestamp": "2026-03-20 16:45:03"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/游戏辅助AI模型高收益高风险3到6个月必须收手.mp4", "title": "游戏辅助AI模型高收益高风险3到6个月必须收手", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 49.622374057769775, "timestamp": "2026-03-20 16:46:20"}
{"platform": "小红书", "video_path": "/Users/karuo/Movies/soul视频/第127场_20260318_output/成片/面试三面流程 简历+测试+试岗300份筛到2到3个人.mp4", "title": "面试三面流程 简历+测试+试岗300份筛到2到3个人", "success": true, "status": "likely_published", "message": "发布按钮+确认已点击,视频可能仍在处理", "screenshot": "/tmp/xhs_result.png", "elapsed_sec": 49.556177854537964, "timestamp": "2026-03-20 16:47:36"}

View File

@@ -2,9 +2,9 @@
name: 视频切片
description: Soul派对视频切片 + 快速混剪 + 切片动效包装(片头/片尾/程序化)+ 剪映思路借鉴(智能剪口播/镜头分割)。触发词含视频剪辑、切片发布、快速混剪、切片动效包装、程序化包装、片头片尾。
group: 木
triggers: 视频剪辑、切片发布、字幕烧录、全画面标定、竖屏裁剪、飞书录屏白边、**快速混剪、混剪预告、快剪串联、切片动效包装、程序化包装、片头片尾、批量封面、视频包装**、镜头切分、场景检测
triggers: 视频剪辑、切片发布、字幕烧录、全画面标定、竖屏裁剪、飞书录屏白边、**快速混剪、混剪预告、快剪串联、切片动效包装、程序化包装、片头片尾、批量封面、视频包装**、镜头切分、场景检测、**运营短切片、15秒切片、30秒切片、京剧梗、热点密度切片**
owner: 木叶
version: "1.4"
version: "1.5"
updated: "2026-03-20"
---
@@ -24,7 +24,20 @@ updated: "2026-03-20"
提取后立即繁转简+修正错误 封面+字幕(已简体)+加速10%+去语气词
```
**切片时长**每段为**完整的一个片段**,时长 **30 秒300 秒**,由该完整片段起止时间决定。**标题**用一句**刺激性观点**(见 `Soul竖屏切片_SKILL.md`)。
**切片时长(两种模式)**
| 模式 | 单段时长 | 条数/场(建议) | 选题侧重 |
|------|-----------|-----------------|----------|
| **深度切片(默认)** | **30 秒300 秒**,完整语义单元 | 610 | 提问→回答、整场观点 |
| **运营短切片** | **1530 秒**(可 `--min-duration` / `--max-duration` 微调) | **2030**(默认脚本 **24** | **京剧/戏曲比喻梗**、**当场热点词**、强反差金句,适合抖音高密度测试 |
运营短切片流程与深度切片相同(转录 → `identify_highlights``batch_clip``soul_enhance`),区别在 **高光 preset****prompt**`identify_highlights.py --preset ops-short` 会在提示词中要求模型**整场均匀取点**,并优先京剧相关比喻/唱腔梗与热点表达;过滤逻辑会**丢弃**短于 15 秒或长于 30 秒的区间(深度模式只卡最短 60 秒、不卡上限)。
**开场 ASR 噪声**:派对录播常在开场出现同一短句循环(如「我看你不太好」),会把模型注意力锁死在前几分钟。**ops-short 默认**将送模型的文字稿与成片时间轴**从约 7:30450 秒)之后**才开始(`--prompt-min-sec`,可改)。若你的场次正片明显更早开始,可改小该值或临时改 `long` 再人工筛 `highlights.json`
**批量节奏(人工剪辑对齐)**:一场录播可先按 **1530 条**为一轮做高光与切片,再进成片;多轮叠加时注意 `highlights.json` 备份,避免覆盖。
**标题**:深度模式用一句**刺激性观点**;短切片标题 **410 个汉字** 为宜(见 `Soul竖屏切片_SKILL.md`)。
**提问→回答 结构**若片段内有人提问前3秒优先展示**提问问题**,再播回答;高光识别填 `question``hook_3sec` 与之一致,成片整条去语助词。详见 `参考资料/视频结构_提问回答与高光.md``参考资料/高光识别提示词.md`
@@ -54,6 +67,15 @@ cd 03_卡木/木叶_视频内容/视频切片/脚本
conda activate mlx-whisper
python3 soul_slice_pipeline.py --video "/path/to/soul派对会议第57场.mp4" --clips 6
# 运营短切片1530 秒 × 约 24 条,京剧梗+热点优先,两目录+竖屏成片)
python3 soul_slice_pipeline.py -v "视频.mp4" -o "/path/to/场次_output" --two-folders --ops-short --prefix soul127
# 已转录场次仅重跑高光+切片+成片(省 MLX
python3 soul_slice_pipeline.py -v "视频.mp4" -o "/path/to/场次_output" --two-folders --ops-short --skip-transcribe --prefix soul127
# 自定义条数与时长区间
python3 soul_slice_pipeline.py -v "视频.mp4" -o "/path/to/out" --two-folders --highlight-preset ops-short -n 28 --min-clip-sec 10 --max-clip-sec 30
# 仅重新烧录(字幕转简体后重跑增强)
python3 soul_slice_pipeline.py -v "视频.mp4" -n 6 --skip-transcribe --skip-highlights --skip-clips
@@ -61,6 +83,14 @@ python3 soul_slice_pipeline.py -v "视频.mp4" -n 6 --skip-transcribe --skip-hig
python3 soul_slice_pipeline.py -v "视频.mp4" -n 8 --two-folders --quick-montage
```
**分步:仅高光(运营短切片)**
```bash
python3 identify_highlights.py -t transcript.srt -o highlights.json --preset ops-short -n 24
# 或显式时长 + 长视频也强调京剧/热点:
python3 identify_highlights.py -t transcript.srt -o highlights.json --preset ops-short -n 26 --min-duration 15 --max-duration 30 --ops-jingju-hotspot
```
流程:**转录 → 字幕转简体 → 高光识别 → 批量切片 → 增强**
#### 分步命令
@@ -155,6 +185,8 @@ python3 analyze_feishu_ui_crop.py "/path/to/原片.mp4" --at 0.2
将输出的 `CROP_VF` 传给:`python3 soul_enhance.py ... --vertical --crop-vf '...'``OVERLAY_X` 脚本会一并打印;也可用 `--overlay-x` 覆盖)。
**全画面入画(不裁竖条)**:加 `--vertical-fit-full`,整幅 16:9 缩放入 498×1080 + 上下黑边,左右内容都可见。详见 `Soul竖屏切片_SKILL.md` 第六节 B。
详见:`参考资料/竖屏中段裁剪参数说明.md``脚本/analyze_feishu_ui_crop.py`
**FFmpeg 一条命令(固定参数):**
@@ -350,7 +382,7 @@ python3 scripts/burn_subtitles_clean.py -i enhanced.mp4 -s clean.srt -o 成片.m
| **kill_ffmpeg_when_clip_done.py** | 剪辑结束后自动关掉 ffmpeg监视剪映/PID 或立即杀) | ⭐ 按需 |
| **scene_detect_to_highlights.py** | 镜头/场景检测 → highlights.jsonPySceneDetect可接 batch_clip | ⭐⭐ |
| chapter_themes_to_highlights.py | 按章节 .md 主题提取片段本地模型→highlights.json | ⭐⭐⭐ |
| identify_highlights.py | 高光识别API 优先→Ollama→规则,默认 gpt-4o | ⭐⭐ |
| identify_highlights.py | 高光识别API→Ollama→规则`--preset ops-short` 为 1530 秒运营密度 | ⭐⭐ |
| batch_clip.py | 批量切片 | ⭐⭐ |
| one_video.py | 单视频一键成片 | ⭐⭐ |
| burn_subtitles_clean.py | 字幕烧录(无阴影) | ⭐ |

View File

@@ -1,7 +1,7 @@
---
name: Soul竖屏切片
description: Soul 派对视频→竖屏成片498×1080剪辑→成片两文件夹竖屏裁剪以全画面 1920×1080 标定analyze_feishu_ui_crop.py默认深色带 crop=568@508+居中498、无右侧白边。MLX 转录→高光→batch_clip→soul_enhance封面+字幕同步+逐字可选+去语助词+纠错+违禁词→visual_enhance v8 可选。LTX/基因胶囊可选。
triggers: Soul竖屏切片、视频切片、热点切片、竖屏成片、派对切片、全画面标定、竖屏裁剪、白边、飞书录屏、LTX、AI生成视频、Retake重剪、字幕优化、字幕同步、逐字字幕
triggers: Soul竖屏切片、视频切片、热点切片、竖屏成片、派对切片、全画面标定、竖屏裁剪、全画面成片、letterbox、画面显示全、白边、飞书录屏、LTX、AI生成视频、Retake重剪、字幕优化、字幕同步、逐字字幕
owner: 木叶
group: 木
version: "1.4"
@@ -72,7 +72,7 @@ python3 analyze_feishu_ui_crop.py "/path/to/全画面.jpg"
|----|------|
| **单段时长** | **30300 秒**,由完整片段起止决定 |
| **完整性** | 每段是一个完整话题/情节,有头有尾 |
| **标题** | **一句刺激性观点**(金句、反常识、结论句) |
| **标题** | **一句刺激性观点****46 个汉字**为宜(单行封面好读、主题一眼懂);忌长句当文件名 |
| **数量** | 建议 ≤10 段/场 |
| **语助词** | 识别与剪辑须符合 `参考资料/高光识别提示词.md`,成片由 soul_enhance 统一去语助词 |
@@ -81,7 +81,8 @@ python3 analyze_feishu_ui_crop.py "/path/to/全画面.jpg"
## 五、成片:封面 + 字幕 + 竖屏
- **封面**:竖屏 498×1080 内**不超出界面****半透明质感**(背景 alpha=165深色渐变、左上角 Soul logo**封面显示标题 = 成片文件名 = highlights.title**(去杠、去下划线后一致,无 `:|—/_`、无序号);标题文字严格居中、多行自动换行。透明度由 `VERTICAL_COVER_ALPHA` 调节。
- **字幕**:封面结束后才显示,**居中**在竖屏内先尝试**单次 FFmpeg 通道**(一次 pass 完成所有字幕叠加最快若失败自动回退到分批模式batch_size=40语助词在解析阶段已由 `clean_filler_words` 去除。重新加字幕时加 `--force-burn-subs`。⚠️ 注意:当前 FFmpeg 不支持 drawtext/subtitles 滤镜,只能用 PIL 图像 overlay 方案。
- **字幕**:封面结束后先留**约 3 秒纯画面**(无字幕),再开始叠字幕;字幕**居中**在竖屏内先尝试**单次 FFmpeg 通道**(一次 pass 完成所有字幕叠加最快若失败自动回退到分批模式batch_size=40语助词在解析阶段已由 `clean_filler_words` 去除。重新加字幕时加 `--force-burn-subs`。⚠️ 注意:当前 FFmpeg 不支持 drawtext/subtitles 滤镜,只能用 PIL 图像 overlay 方案。(脚本常量:`SUBS_START_AFTER_COVER_SEC`,默认 3.0
- **封面标题**:高光 `title` 建议 **46 个汉字**;成片内封面主标题最多显示 **6 个汉字**(超长由 `soul_enhance` 自动截断,与文件名 `--title-only` 一致)。
- **竖屏**498×1080crop 参数与 `参考资料/竖屏中段裁剪参数说明.md` 一致
### ⚠️ 字幕烧录常见坑(已修复)
@@ -98,14 +99,26 @@ python3 analyze_feishu_ui_crop.py "/path/to/全画面.jpg"
---
## 六、竖屏裁剪参数(成片内嵌)
## 六、竖屏输出两种模式(成片内嵌)
### A. 竖条模式(默认,小程序无白边)
只取横向**中间深色带**,再裁成 498 宽,适合抖音全屏铺满、不要桌面白边。
| 步骤 | 滤镜 |
|------|------|
| 1 | crop=568:1080:508:0整段深色小程序主体不含右侧桌面白边 |
| 2 | crop=498:1080:35:0568 内水平居中取 498 |
**输出**498×1080 竖屏。
### B. 全画面模式(`--vertical-fit-full`
**不裁中间竖条**:整幅 16:9 **完整入画**,等比缩放到宽度 498**上下加黑边** 到 1080 高。左侧小程序 + 右侧人像/桌面都会在画面里,适合「画面要显示全」的成片。
- 封面、字幕先在 **完整横版分辨率** 上叠加(`overlay=0:0`),再整体走:
`scale=w=498:h=1080:force_original_aspect_ratio=decrease,pad=498:1080:(ow-iw)/2:(oh-ih)/2:color=black`
- 命令:在原有 `soul_enhance.py ... --vertical --title-only` 上增加 **`--vertical-fit-full`**
**输出**:两种模式均为 **498×1080** 竖屏文件。
---

View File

@@ -121,7 +121,7 @@ CTA的目的是引导用户完成下一步动作。
[
{
"rank": 1,
"title": "简短有力的标题(用于短视频",
"title": "46 个汉字的刺激性观点(用于短视频封面/文件名,单一主题",
"start_time": "00:12:34",
"end_time": "00:13:56",
"duration_sec": 82,
@@ -192,7 +192,7 @@ CTA的目的是引导用户完成下一步动作。
- 优先:有步骤、有起伏、起承转合清晰的片段
- 避免:大段碎碎念、断句混乱、同一句话重复多遍、长时间无信息量停顿
- 与主题片段提取规则一致:每段为完整语义单元,时长 30300 秒,标题为一句刺激性观点
- 与主题片段提取规则一致:每段为完整语义单元,时长 30300 秒**title 固定 46 个汉字**,一句刺激性观点、单一主题(用于竖屏封面字)
## 文字稿格式要求

View File

@@ -0,0 +1,82 @@
[
{
"title": "高薪先看消耗",
"start_time": "00:07:30",
"end_time": "00:12:00",
"hook_3sec": "想两万月薪先看AI月烧多少",
"question": "高薪硬指标是什么?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "想拿2万工资AI月消耗至少1000块以上。消耗多说明你是实实在在用AI在解决职业里的问题",
"reason": "TOKEN消耗硬指标"
},
{
"title": "辅助暴利快收",
"start_time": "00:12:00",
"end_time": "00:18:00",
"hook_3sec": "游戏辅助来钱快,定性也狠",
"question": "做游戏AI能挣多少",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "三角洲辅助瞄准AI训练人物识别模型。闲鱼抖音直播分销单价500块一个人一个月",
"reason": "高收益高风险"
},
{
"title": "保镖钱在后端",
"start_time": "00:18:00",
"end_time": "00:23:00",
"hook_3sec": "真赚的不是保镖费,是后端",
"question": "保镖怎么赚大钱?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "初级2万一个月中级3万高级4万。真正赚钱的是商务中介、介绍投资、拉业务合作",
"reason": "信任关系变现"
},
{
"title": "别两头扛流量",
"start_time": "00:23:00",
"end_time": "00:28:00",
"hook_3sec": "流量交付同时扛,必崩",
"question": "做流量还是交付?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "现在这条赛道缺的是流量,不是交付。和群主合作要分钱,不分必被排挤",
"reason": "一端打穿"
},
{
"title": "分钱分缺口",
"start_time": "00:28:00",
"end_time": "00:33:00",
"hook_3sec": "分他靠自己赚不到的那块",
"question": "招人怎么分钱?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "员工自己只能挣5000到8000你给他1万他多拿2000他才愿意跟你干",
"reason": "分钱逻辑"
},
{
"title": "推流就三板斧",
"start_time": "00:33:00",
"end_time": "00:38:00",
"hook_3sec": "二百七十万推流,密码就几条",
"question": "派对流量怎么来?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "流量密码就那几个职场、搞钱、MBTI性格匹配最容易共鸣",
"reason": "Soul数据复盘"
},
{
"title": "面试三面定人",
"start_time": "00:38:00",
"end_time": "00:43:00",
"hook_3sec": "三百简历,最后只要两三个",
"question": "怎么筛人?",
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "二面线上做题、跟团队开25分钟会看配合。三面定薪资岗位7天试岗",
"reason": "面试流程"
},
{
"title": "实操碾压学历",
"start_time": "00:43:00",
"end_time": "00:48:00",
"hook_3sec": "不学AI连班都难上",
"question": null,
"cta_ending": "今天就到这里,点个关注下次不迷路",
"transcript_excerpt": "想拿2万工资AI月消耗至少1000。这不是卡学历是卡你有没有真在用AI干活",
"reason": "实操门槛"
}
]

View File

@@ -11,12 +11,19 @@ import os
import re
import sys
from pathlib import Path
from typing import Optional
OLLAMA_URL = "http://localhost:11434"
DEFAULT_CTA = "关注我,每天学一招私域干货"
CLIP_COUNT = 15
MIN_DURATION = 60 # 最少 1 分钟
MIN_DURATION = 60 # 最少 1 分钟(长切片默认)
MAX_DURATION = 300 # 最多 5 分钟
# 运营短切片默认:单场产出高密度短视频,便于抖音/热点测试
OPS_SHORT_MIN = 15
OPS_SHORT_MAX = 30
OPS_SHORT_CLIPS = 24
# 飞书/录屏开场常见 ASR 鬼畜循环,运营短切片喂给模型的文字稿从该秒之后开始(约 7:30
OPS_SHORT_PROMPT_MIN_SEC_DEFAULT = 450.0
# API 默认模型:优先用当前可用最佳(可被 OPENAI_MODEL / OPENAI_MODELS 覆盖)
DEFAULT_API_MODEL = "gpt-4o"
@@ -43,18 +50,27 @@ def parse_srt_segments(srt_path: str) -> list:
return segments
def fallback_highlights(transcript_path: str, clip_count: int) -> list:
"""规则备用:每段 60-300 秒1-5 分钟)"""
def fallback_highlights(
transcript_path: str,
clip_count: int,
min_dur: float = 60,
max_dur: float = 300,
start_from_sec: float = 0,
) -> list:
"""规则备用:按 min_durmax_dur 均匀切分;可从 start_from_sec 起切(运营短切片对齐正片起点)。"""
segments = parse_srt_segments(transcript_path)
if not segments:
return []
total = segments[-1]["end_sec"] if segments else 0
seg_dur = min(300, max(60, total / clip_count)) # 每段 1-5 分钟
start_from_sec = max(0, min(float(start_from_sec), max(0, total - min_dur - 2)))
usable = max(0, total - start_from_sec - 2)
target = usable / max(1, clip_count)
seg_dur = min(max_dur, max(min_dur, target))
result = []
for i in range(clip_count):
start_sec = int(i * seg_dur)
end_sec = min(int(start_sec + seg_dur), int(total - 5))
if end_sec <= start_sec + 59: # 不足 1 分钟跳过
start_sec = int(start_from_sec + i * seg_dur)
end_sec = min(int(start_sec + seg_dur), int(total - 2))
if end_sec <= start_sec + max(5, min_dur - 1):
continue
# 找该时间段内的字幕
texts = [s["text"] for s in segments if s["end_sec"] >= start_sec and s["start_sec"] <= end_sec]
@@ -116,6 +132,40 @@ def srt_to_timestamped_text(srt_path: str, skip_repetitive_head: int = 150) -> s
return "\n".join(f"[{s}] {t}" for s, t in out)
def srt_text_from_min_sec(srt_path: str, min_start_sec: float) -> str:
"""只保留字幕开始时间 >= min_start_sec 的行,拼成带时间戳文本(削掉开场噪声再送模型)。"""
segments = parse_srt_segments(srt_path)
if not segments:
return ""
lines = [
f"[{s['start_time']}] {s['text']}"
for s in segments
if s["start_sec"] >= min_start_sec
]
return "\n".join(lines)
def _filter_start_not_before(data: list, min_start_sec: float) -> list:
"""丢弃开始时间早于 min_start_sec 的片段(运营短切片防开场鬼畜)。"""
out = []
for item in data:
if not isinstance(item, dict):
continue
st = item.get("start_time") or item.get("start") or "00:00:00"
if isinstance(st, (int, float)):
sec = float(st)
else:
sec = _parse_time_to_sec(str(st))
if sec >= min_start_sec:
out.append(item)
else:
print(
f" 过滤过早片段: {item.get('title', '?')} (起 {sec:.0f}s < {min_start_sec:.0f}s)",
file=sys.stderr,
)
return out
def _sec_to_hhmmss(sec: float) -> str:
"""秒数转为 HH:MM:SS"""
s = int(sec)
@@ -140,8 +190,8 @@ def _parse_time_to_sec(t: str) -> float:
return 0
def _filter_short_clips(data: list) -> list:
"""过滤掉时长 < 60 秒的切片"""
def _filter_clips_by_duration(data: list, min_sec: float, max_sec: Optional[float]) -> list:
"""按时长过滤max_sec 为 None 时不限制上限"""
result = []
for item in data:
if not isinstance(item, dict):
@@ -149,26 +199,95 @@ def _filter_short_clips(data: list) -> list:
st = item.get("start_time") or item.get("start") or "00:00:00"
et = item.get("end_time") or item.get("end") or "00:01:00"
dur = _parse_time_to_sec(et) - _parse_time_to_sec(st)
if dur >= 60:
ok_min = dur >= min_sec
ok_max = max_sec is None or dur <= max_sec
if ok_min and ok_max:
result.append(item)
else:
print(f" 过滤短片段: {item.get('title','?')} (仅{dur:.0f}秒)", file=sys.stderr)
why = []
if not ok_min:
why.append(f"短于{min_sec:.0f}")
if not ok_max:
why.append(f"长于{max_sec:.0f}")
print(
f" 过滤片段: {item.get('title','?')} ({dur:.0f}秒, {','.join(why)})",
file=sys.stderr,
)
return result
def _build_prompt(transcript: str, clip_count: int) -> str:
def _ops_short_ai_plausible(
data: list,
min_dur: float,
max_dur: float,
min_start_sec: float,
min_count: int = 5,
) -> bool:
"""运营短切片AI 必须给出足够条数,且每条时长与起点符合窗口,否则走规则均匀切。"""
if not data or not isinstance(data, list) or len(data) < min_count:
return False
tol = 1.5
for item in data:
if not isinstance(item, dict):
return False
st = item.get("start_time") or item.get("start") or "00:00:00"
et = item.get("end_time") or item.get("end") or "00:01:00"
if isinstance(st, (int, float)):
st = _sec_to_hhmmss(float(st))
if isinstance(et, (int, float)):
et = _sec_to_hhmmss(float(et))
try:
ssec = _parse_time_to_sec(str(st))
esec = _parse_time_to_sec(str(et))
except Exception:
return False
dur = esec - ssec
if dur < min_dur - tol or dur > max_dur + tol:
return False
if min_start_sec > 0 and ssec < min_start_sec - tol:
return False
return True
def _transcript_for_prompt(transcript: str, clip_count: int, min_dur: float) -> str:
"""长视频多短切片时需要更大上下文,避免高光只落在开头"""
if min_dur < 45 or clip_count > 12:
cap = 120000
else:
cap = 5000
return transcript[:cap] if len(transcript) > cap else transcript
def _build_prompt(
transcript: str,
clip_count: int,
min_dur: float = 60,
max_dur: float = 300,
ops_jingju_hotspot: bool = False,
) -> str:
"""构建高光识别 prompt提问→回答有提问时 question/hook_3sec 用提问问题)"""
txt = transcript[:5000] if len(transcript) > 5000 else transcript
txt = _transcript_for_prompt(transcript, clip_count, min_dur)
dur_rule = f"每段时长必须严格在 {int(min_dur)}{int(max_dur)} 秒之间(看时间戳相减),不要输出低于 {int(min_dur)} 秒或超过 {int(max_dur)} 秒的区间。"
extra = ""
if ops_jingju_hotspot:
extra = """
## 运营短切片选题(优先)
- 优先剪:说话人用**京剧、戏曲、唱腔、行当、锣鼓**等做的比喻或梗(有趣、反差、易传播)。
- 其次:当场**热点词**平台规则、搞钱案例、AI/职场/流量等强刺激观点),一句话能当标题。
- 仍遵守提问→回答:有提问时 question + hook_3sec 一致。
- 标题 **410 个汉字**,要像抖音封面,忌长句。
"""
return f"""识别视频文字稿中的 {clip_count} 个高光片段,直接输出 JSON 数组,第一个字符必须是 [。
重要:每个话题均优先提问→回答。若某片段里有人提问(观众/连麦者问的问题),必须提取提问内容填 question且 hook_3sec 用该提问成片前3秒先展示提问再播回答。
{dur_rule}
{extra}
示例(有提问):
[{{"title":"普通人怎么敢跟ZF搞","start_time":"01:12:30","end_time":"01:15:30","question":"普通人怎么敢跟ZF搞","hook_3sec":"普通人怎么敢跟ZF搞","cta_ending":"{DEFAULT_CTA}","transcript_excerpt":"维权起头跑通就成生意","reason":"提问+回答完整"}}]
示例(无提问):
[{{"title":"起头难","start_time":"00:05:55","end_time":"00:08:00","hook_3sec":"没人起头就起头","cta_ending":"{DEFAULT_CTA}","transcript_excerpt":"起头难跑通就能变成付费服务","reason":"核心观点"}}]
文字稿(从时间戳提取 start_time、end_time,每段 60-300 秒
文字稿(从时间戳提取 start_time、end_time;整场均匀覆盖,不要扎堆在同一分钟
{txt}
直接输出 JSON 数组,以 [ 开头。有提问的片段必须带 question 且 hook_3sec 与 question 一致。"""
@@ -279,18 +398,27 @@ def _build_api_provider_queue() -> list:
return queue
def call_openai_api(transcript: str, clip_count: int, provider: dict) -> str:
def call_openai_api(
transcript: str,
clip_count: int,
provider: dict,
min_dur: float = 60,
max_dur: float = 300,
ops_jingju_hotspot: bool = False,
) -> str:
"""调用 OpenAI 兼容 APIChat Completion使用指定 base_url / api_key / model。"""
try:
from openai import OpenAI
except ImportError:
raise RuntimeError("未安装 openai 库,请执行: pip install openai")
prompt = _build_prompt(transcript, clip_count)
prompt = _build_prompt(
transcript, clip_count, min_dur, max_dur, ops_jingju_hotspot=ops_jingju_hotspot
)
system = (
"你是短视频策划师。用户会提供视频文字稿,你只输出一个 JSON 数组。"
"若某片段内有人提问(观众/连麦者问的问题),必须提取提问原文填 question且 hook_3sec 用该提问前3秒先展示提问再回答无提问则 hook_3sec 用金句/悬念。"
"格式含 title, start_time, end_time, hook_3sec, cta_ending, transcript_excerpt, reason有提问时加 question。"
"禁止输出任何非 JSON 内容。"
"必须严格遵守用户给出的单段时长区间(秒)。禁止输出任何非 JSON 内容。"
)
client = OpenAI(api_key=provider["api_key"], base_url=provider["base_url"])
resp = client.chat.completions.create(
@@ -308,15 +436,24 @@ def call_openai_api(transcript: str, clip_count: int, provider: dict) -> str:
return content
def call_ollama(transcript: str, clip_count: int = CLIP_COUNT, model: str = "qwen2.5:3b") -> str:
def call_ollama(
transcript: str,
clip_count: int = CLIP_COUNT,
model: str = "qwen2.5:3b",
min_dur: float = 60,
max_dur: float = 300,
ops_jingju_hotspot: bool = False,
) -> str:
"""调用卡若AI本地模型Ollama使用 chat 接口避免对话式误判"""
import requests
prompt = _build_prompt(transcript, clip_count)
prompt = _build_prompt(
transcript, clip_count, min_dur, max_dur, ops_jingju_hotspot=ops_jingju_hotspot
)
system = (
"你是短视频策划师。用户会提供视频文字稿,你只输出一个 JSON 数组。"
"若某片段内有人提问(观众/连麦者问的问题),必须提取提问原文填 question且 hook_3sec 用该提问前3秒先展示提问再回答无提问则 hook_3sec 用金句/悬念。"
"格式含 title, start_time, end_time, hook_3sec, cta_ending, transcript_excerpt, reason有提问时加 question。"
"禁止输出任何非 JSON 内容。"
"必须严格遵守用户给出的单段时长区间(秒)。禁止输出任何非 JSON 内容。"
)
try:
r = requests.post(
@@ -348,14 +485,80 @@ def main():
parser = argparse.ArgumentParser(description="高光识别 - AI 分析文字稿输出 highlights.json")
parser.add_argument("--transcript", "-t", required=True, help="transcript.srt 路径")
parser.add_argument("--output", "-o", required=True, help="highlights.json 输出路径")
parser.add_argument("--clips", "-n", type=int, default=CLIP_COUNT, help="切片数量")
parser.add_argument("--clips", "-n", type=int, default=None, help="切片数量(默认随 preset")
parser.add_argument(
"--preset",
choices=["long", "ops-short"],
default="long",
help="long=单场深度切片 60300 秒ops-short=运营短切片 1530 秒×约24条京剧梗+热点优先",
)
parser.add_argument(
"--min-duration",
type=float,
default=None,
help="单段最小时长(秒),默认随 preset",
)
parser.add_argument(
"--max-duration",
type=float,
default=None,
help="单段最大时长(秒),默认随 preset长切片模式可不限制上限则传极大值",
)
parser.add_argument(
"--ops-jingju-hotspot",
action="store_true",
help="在 prompt 中强调京剧比喻/唱腔梗 + 热点选题(可与 ops-short 同用)",
)
parser.add_argument(
"--prompt-min-sec",
type=float,
default=None,
help="送模型的 SRT 从该秒之后截取ops-short 默认 450约7:30long 默认 0",
)
parser.add_argument("--require-ai", action="store_true", help="必须用 AI 识别,失败则退出不兜底")
args = parser.parse_args()
if args.preset == "ops-short":
min_dur = float(args.min_duration if args.min_duration is not None else OPS_SHORT_MIN)
max_dur = float(args.max_duration if args.max_duration is not None else OPS_SHORT_MAX)
clip_n = int(args.clips if args.clips is not None else OPS_SHORT_CLIPS)
ops_focus = True # 运营短切片默认强调京剧梗+热点
filter_max_sec = max_dur # 严格卡上限
else:
min_dur = float(args.min_duration if args.min_duration is not None else MIN_DURATION)
max_dur = float(args.max_duration if args.max_duration is not None else MAX_DURATION)
clip_n = int(args.clips if args.clips is not None else CLIP_COUNT)
ops_focus = bool(args.ops_jingju_hotspot)
filter_max_sec = None # 与历史一致:只滤掉过短,不因略超 300 秒丢片
prompt_min_sec = (
float(args.prompt_min_sec)
if args.prompt_min_sec is not None
else (
OPS_SHORT_PROMPT_MIN_SEC_DEFAULT
if args.preset == "ops-short"
else 0.0
)
)
transcript_path = Path(args.transcript)
if not transcript_path.exists():
print(f"❌ 文字稿不存在: {transcript_path}", file=sys.stderr)
sys.exit(1)
text = srt_to_timestamped_text(str(transcript_path))
if prompt_min_sec > 0:
text = srt_text_from_min_sec(str(transcript_path), prompt_min_sec)
if len(text) < 400:
print(
"⚠️ 截断后文字稿过短,回退 srt_to_timestamped_text 全长",
file=sys.stderr,
)
text = srt_to_timestamped_text(str(transcript_path))
else:
print(
f" 运营短切片:送模型文字稿已从 {prompt_min_sec:.0f}s 之后截取(约 {len(text)} 字)",
flush=True,
)
else:
text = srt_to_timestamped_text(str(transcript_path))
fb_start = float(prompt_min_sec) if args.preset == "ops-short" else 0.0
if len(text) < 100:
print("❌ 文字稿过短,请检查 SRT 格式", file=sys.stderr)
sys.exit(1)
@@ -366,13 +569,29 @@ def main():
for provider in api_queue:
try:
print(f"正在调用 API {provider.get('model', '?')} 分析高光片段...")
raw = call_openai_api(text, args.clips, provider)
raw = call_openai_api(
text,
clip_n,
provider,
min_dur=min_dur,
max_dur=max_dur,
ops_jingju_hotspot=ops_focus,
)
if not raw:
raise ValueError("API 返回空")
data = _parse_ai_json(raw)
if data and isinstance(data, list) and len(data) > 0:
print(f" ✓ API ({provider.get('model', '?')}) 成功,识别 {len(data)}")
break
if args.preset == "ops-short" and not _ops_short_ai_plausible(
data, min_dur, max_dur, prompt_min_sec
):
print(
" API 结果不符合运营短切片规则,丢弃并尝试下一通道",
file=sys.stderr,
)
data = None
else:
print(f" ✓ API ({provider.get('model', '?')}) 成功,识别 {len(data)}")
break
except Exception as e:
print(f" API ({provider.get('model', '?')}) 失败: {e}", file=sys.stderr)
if raw:
@@ -388,13 +607,29 @@ def main():
for model in OLLAMA_MODELS:
try:
print(f"正在调用 Ollama {model} 分析高光片段...")
raw = call_ollama(text, args.clips, model)
raw = call_ollama(
text,
clip_n,
model,
min_dur=min_dur,
max_dur=max_dur,
ops_jingju_hotspot=ops_focus,
)
if not raw:
raise ValueError("模型返回空")
data = _parse_ai_json(raw)
if data and isinstance(data, list) and len(data) > 0:
print(f"{model} 成功,识别 {len(data)}")
break
if args.preset == "ops-short" and not _ops_short_ai_plausible(
data, min_dur, max_dur, prompt_min_sec
):
print(
f" {model} 结果不符合运营短切片规则,尝试下一模型",
file=sys.stderr,
)
data = None
else:
print(f"{model} 成功,识别 {len(data)}")
break
except Exception as e:
print(f" {model} 失败: {e}", file=sys.stderr)
if raw:
@@ -405,14 +640,16 @@ def main():
print("❌ 必须用 AI 识别,当前无可用模型或解析失败", file=sys.stderr)
sys.exit(1)
print("使用规则备用切分", file=sys.stderr)
data = fallback_highlights(str(transcript_path), args.clips)
data = fallback_highlights(
str(transcript_path), clip_n, min_dur, max_dur, start_from_sec=fb_start
)
if not data:
data = fallback_highlights(str(transcript_path), args.clips)
data = fallback_highlights(
str(transcript_path), clip_n, min_dur, max_dur, start_from_sec=fb_start
)
if not isinstance(data, list):
data = [data]
# 过滤短于 1 分钟的切片
data = _filter_short_clips(data)
# 统一 start_time/end_time 为 HH:MM:SS兼容 Ollama 返回秒数)
# 先统一时间为 HH:MM:SS再按时长过滤兼容模型返回数值秒
for item in data:
if not isinstance(item, dict):
continue
@@ -422,10 +659,15 @@ def main():
et = item.get("end_time") or item.get("end")
if isinstance(et, (int, float)):
item["end_time"] = _sec_to_hhmmss(et)
if args.preset == "ops-short" and prompt_min_sec > 0:
data = _filter_start_not_before(data, prompt_min_sec)
data = _filter_clips_by_duration(data, min_dur, filter_max_sec)
# 若 AI 返回的片段全被过滤,用规则备用
if not data and transcript_path.exists():
print(" AI 片段时长无效,改用规则切分1-5 分钟)", file=sys.stderr)
data = fallback_highlights(str(transcript_path), args.clips)
print(" AI 片段时长无效,改用规则切分", file=sys.stderr)
data = fallback_highlights(
str(transcript_path), clip_n, min_dur, max_dur, start_from_sec=fb_start
)
# 强制中文
print(" 确保导出名与封面为简体中文...")
data = _ensure_chinese_highlights(data)

View File

@@ -64,6 +64,12 @@ CROP_VF = "crop=568:1080:508:0,crop=498:1080:35:0"
VERTICAL_W, VERTICAL_H = 498, 1080
OVERLAY_X = 543 # 508+35与历史 483+60 对齐,避免封面/字幕错位
# 竖屏「全画面入画」:不裁中间竖条;整幅横版等比缩放入 498×1080上下黑边letterbox
VERTICAL_FIT_FULL_VF = (
"scale=w=498:h=1080:force_original_aspect_ratio=decrease:flags=lanczos,"
"pad=498:1080:(ow-iw)/2:(oh-ih)/2:color=black"
)
def _overlay_x_from_crop_vf(crop_vf: str):
"""从两段 crop 解析字幕/封面叠在横版上的 xcrop=W:1080:X:0,crop=498:1080:Y:0 → X+Y"""
@@ -81,13 +87,14 @@ def build_typewriter_subtitle_images(
temp_dir,
out_w,
out_h,
cover_duration,
subtitle_overlay_start,
min_step_sec=0.05,
max_steps_per_line=28,
):
"""
将每条字幕拆成多帧:同一时间段内前缀逐字(逐段)变长,读起来更顺、更像跟读语音。
长句按步数上限均分字符,避免单条 concat 段过多。
subtitle_overlay_start最早显示字幕的时间轴须 ≥ 封面结束 + 留白。
"""
sub_images = []
img_idx = 0
@@ -96,7 +103,7 @@ def build_typewriter_subtitle_images(
if not safe_text or not safe_text.strip():
continue
s, e = float(sub["start"]), float(sub["end"])
s = max(s, cover_duration)
s = max(s, subtitle_overlay_start)
if s >= e - 0.02:
continue
dur = e - s
@@ -299,6 +306,11 @@ STYLE = {
}
}
# 字幕与语音同步的全局延迟补偿(秒);封面后留白再叠字幕;封面标题汉字上限(须在本文件先于 _limit_cover_title_cjk 定义)
SUBTITLE_DELAY_SEC = 2.0
SUBS_START_AFTER_COVER_SEC = 3.0
COVER_TITLE_MAX_CJK = 6
# ============ 工具函数 ============
def get_font(font_path, size):
@@ -339,6 +351,21 @@ def _normalize_title_for_display(title: str) -> str:
return s
def _limit_cover_title_cjk(text: str, max_cjk: int = COVER_TITLE_MAX_CJK) -> str:
"""封面标题最多保留 max_cjk 个汉字(含汉字即计数);超长截断,避免封面字过小或换行过多。"""
if not text or max_cjk <= 0:
return text or ""
out = []
n_cjk = 0
for ch in text:
if "\u4e00" <= ch <= "\u9fff":
n_cjk += 1
if n_cjk > max_cjk:
break
out.append(ch)
return "".join(out).strip()
# macOS/APFS 文件名允许的中文标点(保留刺激性标题所需的标点)
_SAFE_CJK_PUNCT = set(",。?!;:·、…()【】「」《》~—·+")
@@ -449,14 +476,6 @@ def _detect_clip_pts_offset(clip_path: str) -> float:
return 0.0
# 字幕与语音同步的全局延迟补偿(秒)
# batch_clip -ss input seeking 导致实际切割比请求早 0~3 秒(关键帧对齐)
# 字幕按 highlights.start_time 算相对时间,会比实际音频提前
# 加正值延迟 = 字幕往后推 = 与声音更同步
# 2025-03 实测Soul派对直播视频关键帧间距 2-4 秒,补偿需约 2.0s
SUBTITLE_DELAY_SEC = 2.0 # 增大到 2.0,避免字幕超前于说话
def _is_noise_line(text: str) -> bool:
"""检测是否为噪声行单字母、重复符号、ASR幻觉等"""
if not text:
@@ -625,9 +644,9 @@ def _sec_to_srt_time(sec):
return f"{h:02d}:{m:02d}:{s:02d},{ms:03d}"
def write_clip_srt(srt_path, subtitles, cover_duration):
"""写出用于烧录的 SRT仅保留封面结束后的字幕时间已相对片段"""
safe_start = cover_duration + 0.3
def write_clip_srt(srt_path, subtitles, cover_duration, subs_after_cover_sec=SUBS_START_AFTER_COVER_SEC):
"""写出用于烧录的 SRT仅保留封面结束+留白后的字幕,时间已相对片段)"""
safe_start = cover_duration + subs_after_cover_sec + 0.3
lines = []
idx = 1
for sub in subtitles:
@@ -726,8 +745,11 @@ def get_video_info(video_path):
'-of', 'json', video_path
]
result = subprocess.run(cmd, capture_output=True, text=True)
info = json.loads(result.stdout)
stream = info['streams'][0]
info = json.loads(result.stdout or "{}")
streams = info.get("streams") or []
if not streams:
raise ValueError(f"ffprobe 无视频流: {video_path}")
stream = streams[0]
# 获取时长
cmd2 = [
@@ -909,31 +931,39 @@ def create_cover_image(hook_text, width, height, output_path, video_path=None):
# ============ 字幕图片生成 ============
def create_subtitle_image(text, width, height, output_path):
"""创建字幕图片(关键词加粗加大突出)。竖屏 498 宽时字号略小、保证居中且不溢出"""
"""创建字幕图片关键词加粗加大突出。498 竖条时居中;全幅横版时偏下居中(为 --vertical-fit-full"""
style = STYLE['subtitle']
img = Image.new('RGBA', (width, height), (0, 0, 0, 0))
draw = ImageDraw.Draw(img)
# 竖屏窄幅时缩小字号,保证整行在画面内且居中
base_size = style['font_size']
if (width, height) == (VERTICAL_W, VERTICAL_H):
base_size = min(base_size, 38)
elif height == 1080 and width >= 1280:
# 1920×1080 全画面叠字:字号略大,条带靠下,避免挡脸
base_size = min(max(base_size, 46), 56)
font = get_font(FONT_BOLD, base_size)
text_w, text_h = get_text_size(draw, text, font)
while text_w > width - 80 and base_size > 24:
margin_x = 120 if width >= 1280 else 80
while text_w > width - margin_x and base_size > 24:
base_size -= 2
font = get_font(FONT_BOLD, base_size)
text_w, text_h = get_text_size(draw, text, font)
kw_size = base_size + style.get('keyword_size_add', 4)
kw_font = get_font(FONT_HEAVY, kw_size)
# 字幕完全居中(水平+垂直正中间);竖屏时限制在界面内不超出
base_x = (width - text_w) // 2
if (width, height) == (VERTICAL_W, VERTICAL_H):
pad = 24
base_x = max(pad, min(width - pad - text_w, base_x))
base_y = (height - text_h) // 2
base_y = (height - text_h) // 2
elif height == 1080 and width >= 1280:
pad = 40
base_x = max(pad, min(width - pad - text_w, base_x))
base_y = height - text_h - 100
else:
base_y = (height - text_h) // 2
# 背景条(不超出画布)
padding = 15
@@ -1085,10 +1115,11 @@ def _parse_clip_index(filename: str) -> int:
def enhance_clip(clip_path, output_path, highlight_info, temp_dir, transcript_path,
force_burn_subs=False, skip_subs=False, vertical=False,
crop_vf=None, overlay_x=None, typewriter_subs=False):
"""增强单个切片。vertical=True 时最后裁成竖屏 498x1080 直出成片。
crop_vf / overlay_x场次取景微调先截 20% 帧对一下小程序黑框再填)
typewriter_subs同一条字幕时间内前缀逐字渐显更跟口型
crop_vf=None, overlay_x=None, typewriter_subs=False,
vertical_fit_full=False):
"""增强单个切片。vertical=True 时最后输出 498×1080
vertical_fit_full不裁中间竖条整幅画面等比缩放入 498×1080 + 上下黑边,前后内容都可见
否则沿用 crop 竖条(全画面标定深色带)。
"""
print(f" 输入: {os.path.basename(clip_path)}", flush=True)
@@ -1108,17 +1139,28 @@ def enhance_clip(clip_path, output_path, highlight_info, temp_dir, transcript_pa
hook_text = _normalize_title_for_display(raw_title) or raw_title or '精彩切片'
# 封面文字同样做安全处理
hook_text = apply_platform_safety(hook_text)
hook_text = _limit_cover_title_cjk(hook_text, COVER_TITLE_MAX_CJK) or hook_text
cover_duration = STYLE['cover']['duration']
subtitle_overlay_start = cover_duration + SUBS_START_AFTER_COVER_SEC
# 竖屏成片:封面/字幕按 498x1080 做,叠在裁切区域,文字与字幕在竖屏上完整且居中
out_w, out_h = (VERTICAL_W, VERTICAL_H) if vertical else (width, height)
vf_use = (crop_vf or CROP_VF).strip() if vertical else CROP_VF
ox = overlay_x
if vertical and ox is None and crop_vf:
ox = _overlay_x_from_crop_vf(crop_vf)
if vertical and ox is None:
ox = OVERLAY_X
overlay_pos = f"{int(ox)}:0" if vertical else "0:0"
# 竖屏:默认封面/字幕按 498×1080 叠在竖条上;全画面模式按原分辨率全屏叠加再整体缩放
if vertical and vertical_fit_full:
out_w, out_h = width, height
vf_use = ""
overlay_pos = "0:0"
elif vertical:
out_w, out_h = VERTICAL_W, VERTICAL_H
vf_use = (crop_vf or CROP_VF).strip()
ox = overlay_x
if ox is None and crop_vf:
ox = _overlay_x_from_crop_vf(crop_vf)
if ox is None:
ox = OVERLAY_X
overlay_pos = f"{int(ox)}:0"
else:
out_w, out_h = width, height
vf_use = CROP_VF
overlay_pos = "0:0"
# 1. 生成封面
print(f" [1/5] 封面生成中…", flush=True)
@@ -1198,7 +1240,7 @@ def enhance_clip(clip_path, output_path, highlight_info, temp_dir, transcript_pa
print(f" ✓ 字幕解析 ({len(subtitles)}条),将烧录为{mode}字幕", flush=True)
if typewriter_subs:
sub_images = build_typewriter_subtitle_images(
subtitles, temp_dir, out_w, out_h, cover_duration
subtitles, temp_dir, out_w, out_h, subtitle_overlay_start
)
else:
for i, sub in enumerate(subtitles):
@@ -1254,10 +1296,10 @@ def enhance_clip(clip_path, output_path, highlight_info, temp_dir, transcript_pa
# duration X.XXX
# 最后一行不写 duration用于循环/截断防报错)
concat_lines = []
prev_end = cover_duration # 字幕从封面结束后开始
prev_end = subtitle_overlay_start # 字幕从封面结束 + SUBS_START_AFTER_COVER_SEC」起
for img in sub_images:
sub_start = max(img['start'], cover_duration)
sub_start = max(img['start'], subtitle_overlay_start)
sub_end = img['end']
if sub_start >= sub_end:
continue
@@ -1327,15 +1369,26 @@ def enhance_clip(clip_path, output_path, highlight_info, temp_dir, transcript_pa
if result.stderr:
print(f" {str(result.stderr)[:300]}", file=sys.stderr)
# 5.4 输出:竖屏则裁成 498x1080 直出(高光区域裁剪,成片必做
print(f" [5/5] 竖屏裁剪中498×1080", flush=True)
if vertical:
# 5.4 输出:竖屏 498×1080(竖条裁剪 或 全画面 letterbox
print(f" [5/5] 竖屏输出498×1080", flush=True)
if vertical and vertical_fit_full:
r = subprocess.run([
'ffmpeg', '-y', '-i', current_video,
'-vf', VERTICAL_FIT_FULL_VF, '-c:a', 'copy', output_path
], capture_output=True, text=True)
if r.returncode == 0 and os.path.exists(output_path):
print(f" ✓ 全画面缩放+上下黑边完成(未裁中间竖条)", flush=True)
else:
print(f" ❌ 全画面缩放失败: {(r.stderr or '')[:400]}", file=sys.stderr)
shutil.copy(current_video, output_path)
print(f" ⚠ 已回退为未缩放版本", flush=True)
elif vertical:
r = subprocess.run([
'ffmpeg', '-y', '-i', current_video,
'-vf', vf_use, '-c:a', 'copy', output_path
], capture_output=True, text=True)
if r.returncode == 0 and os.path.exists(output_path):
print(f" ✓ 竖屏裁剪完成", flush=True)
print(f" ✓ 竖屏竖条裁剪完成", flush=True)
else:
print(f" ❌ 竖屏裁剪失败: {(r.stderr or '')[:300]}", file=sys.stderr)
shutil.copy(current_video, output_path)
@@ -1378,6 +1431,11 @@ def main():
action="store_true",
help="字幕在同一条时间内前缀逐字渐显(更通顺、更跟读)",
)
parser.add_argument(
"--vertical-fit-full",
action="store_true",
help="竖屏成片不裁中间竖条:整幅 16:9 等比缩放入 498×1080上下黑边画面显示全封面/字幕先叠满横版再缩放",
)
args = parser.parse_args()
clips_dir = Path(args.clips) if args.clips else CLIPS_DIR
@@ -1404,12 +1462,14 @@ def main():
overlay_x_arg = getattr(args, "overlay_x", -1)
overlay_x_arg = None if overlay_x_arg < 0 else overlay_x_arg
typewriter = getattr(args, "typewriter_subs", False)
vfit = getattr(args, "vertical_fit_full", False)
print(
f"功能: 封面+字幕+加速10%+去语气词"
+ ("+竖屏498x1080" if vertical else "")
+ ("+全画面letterbox(不裁竖条)" if vertical and vfit else "")
+ ("+逐字字幕" if typewriter else "")
)
if vertical and crop_vf_arg:
if vertical and crop_vf_arg and not vfit:
print(f"取景: --crop-vf {crop_vf_arg}")
print(f"输入: {clips_dir}")
print(f"输出: {output_dir}" + ("(成片,文件名=标题)" if title_only else ""))
@@ -1439,6 +1499,9 @@ def main():
if getattr(args, 'title_only', False):
title = (highlight_info.get('title') or highlight_info.get('hook_3sec') or clip_path.stem)
title = _limit_cover_title_cjk(
_normalize_title_for_display(str(title)) or str(title), COVER_TITLE_MAX_CJK
) or str(title)
name = sanitize_filename(title) + '.mp4'
output_path = output_dir / name
else:
@@ -1458,6 +1521,7 @@ def main():
crop_vf=crop_vf_arg or None,
overlay_x=overlay_x_arg,
typewriter_subs=typewriter,
vertical_fit_full=vfit,
):
success_count += 1
finally:

View File

@@ -105,7 +105,31 @@ def main():
parser = argparse.ArgumentParser(description="Soul 切片一体化流水线")
parser.add_argument("--video", "-v", required=True, help="输入视频路径")
parser.add_argument("--output", "-o", help="输出目录(默认:视频同目录下 视频名_output")
parser.add_argument("--clips", "-n", type=int, default=8, help="切片数量")
parser.add_argument("--clips", "-n", type=int, default=8, help="切片数量--ops-short 且未改 -n 时默认 24")
parser.add_argument(
"--highlight-preset",
choices=["long", "ops-short"],
default="long",
help="高光识别 presetlong=60300 秒深度切片ops-short=1530 秒运营密度切片",
)
parser.add_argument(
"--ops-short",
action="store_true",
help="运营短切片一键preset=ops-short未指定 -n 时用 24 条",
)
parser.add_argument("--min-clip-sec", type=float, default=None, help="传给 identify_highlights --min-duration")
parser.add_argument("--max-clip-sec", type=float, default=None, help="传给 identify_highlights --max-duration")
parser.add_argument(
"--ops-jingju-hotspot",
action="store_true",
help="高光 prompt 强调京剧梗+热点(可与 ops-short 同用)",
)
parser.add_argument(
"--prompt-min-sec",
type=float,
default=None,
help="传给 identify_highlights送模型的 SRT 从该秒之后截取ops-short 默认 450",
)
parser.add_argument("--skip-transcribe", action="store_true", help="跳过转录(已有 transcript.srt")
parser.add_argument("--skip-highlights", action="store_true", help="跳过高光识别(已有 highlights.json")
parser.add_argument("--skip-clips", action="store_true", help="跳过切片(已有 clips/,仅重新增强)")
@@ -122,6 +146,11 @@ def main():
parser.add_argument("--montage-seconds", type=float, default=4.0, help="快速混剪每条截取秒数")
args = parser.parse_args()
if getattr(args, "ops_short", False):
args.highlight_preset = "ops-short"
if args.clips == 8:
args.clips = 24
video_path = Path(args.video).resolve()
if not video_path.exists():
print(f"❌ 视频不存在: {video_path}")
@@ -148,7 +177,7 @@ def main():
print("=" * 60)
print(f"输入视频: {video_path}")
print(f"输出目录: {base_dir}")
print(f"切片数量: {args.clips}")
print(f"切片数量: {args.clips} 高光 preset: {args.highlight_preset}")
print("=" * 60)
# 0. 强制重转录时删除旧产物(含 audio 以重提完整音频)
@@ -201,16 +230,26 @@ def main():
# 2. 高光识别
if not args.skip_highlights:
hl_cmd = [
sys.executable,
str(SCRIPT_DIR / "identify_highlights.py"),
"--transcript", str(transcript_path),
"--output", str(highlights_path),
"--clips", str(args.clips),
"--preset", str(args.highlight_preset),
]
if args.min_clip_sec is not None:
hl_cmd.extend(["--min-duration", str(args.min_clip_sec)])
if args.max_clip_sec is not None:
hl_cmd.extend(["--max-duration", str(args.max_clip_sec)])
if getattr(args, "ops_jingju_hotspot", False):
hl_cmd.append("--ops-jingju-hotspot")
if getattr(args, "prompt_min_sec", None) is not None:
hl_cmd.extend(["--prompt-min-sec", str(args.prompt_min_sec)])
run(
[
sys.executable,
str(SCRIPT_DIR / "identify_highlights.py"),
"--transcript", str(transcript_path),
"--output", str(highlights_path),
"--clips", str(args.clips),
],
"高光识别Ollama→规则",
timeout=180,
hl_cmd,
"高光识别API→Ollama→规则",
timeout=600,
)
if not highlights_path.exists():
print(f"❌ 需要 highlights.json: {highlights_path}")
@@ -283,7 +322,11 @@ def main():
]
if getattr(args, "skip_subs", False):
enhance_cmd.append("--skip-subs")
if getattr(args, "force_burn_subs", False):
if use_two_folders:
enhance_cmd.extend(["--vertical", "--title-only"])
if (getattr(args, "force_burn_subs", False) or use_two_folders) and not getattr(
args, "skip_subs", False
):
enhance_cmd.append("--force-burn-subs")
enhance_timeout = max(900, 600 + len(clips_list) * 90) # 约 90 秒/片
ok = run(enhance_cmd, "增强处理(封面+字幕+加速)", timeout=enhance_timeout, check=False)

View File

@@ -104,7 +104,7 @@
| # | 技能 | 成员 | 触发词 | SKILL 路径 | 一句话 |
|:--|:---|:---|:---|:---|:---|
| M01 | 视频切片 | 木叶 | **视频剪辑、切片发布、切片动效包装、程序化包装、片头片尾、批量封面、视频包装** | `03_卡木/木叶_视频内容/视频切片/SKILL.md` | 长视频切片+字幕+发布;联动切片动效包装(片头/片尾/程序化) |
| M01 | 视频切片 | 木叶 | **视频剪辑、切片发布、切片动效包装、程序化包装、片头片尾、批量封面、视频包装、运营短切片、15秒切片、热点密度、京剧梗** | `03_卡木/木叶_视频内容/视频切片/SKILL.md` | 长视频切片+字幕+发布;**ops-short 1530 秒×2030 条**联动切片动效包装(片头/片尾/程序化) |
| M01b | 抖音视频解析 | 木叶 | **抖音视频、抖音链接、抖音解析、抖音下载、提取抖音文案、抖音无水印** | `03_卡木/木叶_视频内容/抖音视频解析/SKILL.md` | 链接→解析ID→提取文案→下载无水印视频 |
| M01c | 抖音发布 | 木叶 | **抖音发布、发布到抖音、抖音登录、抖音上传、腕推抖音** | `03_卡木/木叶_视频内容/抖音发布/SKILL.md` | 纯 API 视频上传+发布VOD + bd-ticket-guard无需浏览器 |
| M01d | B站发布 | 木叶 | **B站发布、发布到B站、B站登录、B站上传、bilibili发布** | `03_卡木/木叶_视频内容/B站发布/SKILL.md` | 纯 APIpreupload 分片Cookie 有效期约6个月 |

View File

@@ -403,3 +403,4 @@
| 2026-03-20 11:27:30 | 🔄 卡若AI 同步 2026-03-20 11:27 | 更新:水溪整理归档、运营中枢工作台 | 排除 >20MB: 11 个 |
| 2026-03-20 12:22:19 | 🔄 卡若AI 同步 2026-03-20 12:22 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 |
| 2026-03-20 13:39:57 | 🔄 卡若AI 同步 2026-03-20 12:40 | 更新:水桥平台对接 | 排除 >20MB: 11 个 |
| 2026-03-20 16:08:50 | 🔄 卡若AI 同步 2026-03-20 16:08 | 更新:水桥平台对接、卡木、运营中枢工作台 | 排除 >20MB: 11 个 |

View File

@@ -0,0 +1,133 @@
#!/usr/bin/env python3
"""
S2 私域管理后台按路由批量全页截图Playwright
用法(需已登录态,任选其一):
1) 先在本机 Chrome 登录后台,再用「用户数据目录」启动浏览器:
python3 s2_admin_fullpage_capture.py --user-data-dir "$HOME/Library/Application Support/Google/Chrome" --channel chromium --headless
(无 Google Chrome.app 时用 channel chromium有则用 --channel chrome。建议先关 Chrome 避免配置锁。)
2) 或使用已导出的 storage_state.json
playwright install chromium
python3 s2_admin_fullpage_capture.py --storage-state /path/to/state.json
默认输出卡若Ai 报告目录下 screenshots/fullpage_playwright/
"""
from __future__ import annotations
import argparse
import asyncio
import re
from pathlib import Path
import httpx
BASE = "https://s2.siyuguanli.com/admin/static/js/app.6dd8bb884b28919ef0f9.js"
ADMIN = "https://s2.siyuguanli.com/admin/#"
DEST_DEFAULT = Path(
"/Users/karuo/Documents/卡若Ai的文件夹/报告/S2私域管理后台_功能与接口调研/screenshots/fullpage_playwright"
)
def parse_routes_from_app_js(text: str) -> list[str]:
paths = re.findall(r'path:"([^"]+)"', text)
stack: list[str] = []
out: list[str] = []
for p in paths:
if p in ("*", "/401", "/404", "/login", "/authredirect", "/"):
continue
if p.startswith("/"):
stack = [p.rstrip("/")]
continue
if not stack:
stack = [""]
segs = [s for s in stack if s]
full = "/" + "/".join(segs + [p])
full = re.sub(r"/+", "/", full)
out.append(full)
seen: set[str] = set()
uniq: list[str] = []
for x in out:
if x not in seen:
seen.add(x)
uniq.append(x)
uniq = ["/home" if x == "/home/home" else x for x in uniq]
if "/home" not in uniq:
uniq.insert(0, "/home")
# 线上大屏常见 hash 为 #/datav/overview与 bundle 内 /dataReport/datav/overview 并存
if "/datav/overview" not in uniq:
uniq.append("/datav/overview")
return uniq
async def main() -> None:
ap = argparse.ArgumentParser()
ap.add_argument("--dest", type=Path, default=DEST_DEFAULT)
ap.add_argument("--storage-state", type=Path, default=None)
ap.add_argument("--user-data-dir", type=Path, default=None)
ap.add_argument("--channel", default="chrome", help="与 user-data-dir 联用chrome | chromium")
ap.add_argument("--wait-ms", type=int, default=2500)
ap.add_argument("--max", type=int, default=0, help="仅截取前 N 条路由0 表示全量")
ap.add_argument(
"--headless",
action="store_true",
help="与 --user-data-dir 联用:无界面跑持久化上下文(需本机已登录该 Profile",
)
ap.add_argument(
"--no-escape",
action="store_true",
help="不在每页加载后按 Escape默认按一次以尝试关闭 v-modal 遮罩)",
)
args = ap.parse_args()
from playwright.async_api import async_playwright
async with httpx.AsyncClient(verify=False, timeout=60) as client:
js = (await client.get(BASE)).text
routes = parse_routes_from_app_js(js)
if args.max and args.max > 0:
routes = routes[: args.max]
args.dest.mkdir(parents=True, exist_ok=True)
async with async_playwright() as p:
if args.user_data_dir:
browser = await p.chromium.launch_persistent_context(
user_data_dir=str(args.user_data_dir),
channel=args.channel if args.channel in ("chrome", "msedge") else None,
headless=args.headless,
viewport={"width": 1440, "height": 900},
)
page = browser.pages[0] if browser.pages else await browser.new_page()
else:
browser = await p.chromium.launch(headless=True)
context = await browser.new_context(
viewport={"width": 1440, "height": 900},
storage_state=str(args.storage_state) if args.storage_state else None,
)
page = await context.new_page()
for i, route in enumerate(routes, 1):
url = f"{ADMIN}{route}"
name = route.strip("/").replace("/", "__") or "home"
fp = args.dest / f"{i:03d}_{name}.png"
try:
await page.goto(url, wait_until="networkidle", timeout=120000)
except Exception:
await page.goto(url, wait_until="domcontentloaded", timeout=120000)
await page.wait_for_timeout(args.wait_ms)
if not args.no_escape:
await page.keyboard.press("Escape")
await page.wait_for_timeout(300)
await page.screenshot(path=str(fp), full_page=True)
print(f"OK {i}/{len(routes)} {fp.name}")
if args.user_data_dir:
await browser.close()
else:
await browser.close()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -406,3 +406,4 @@
| 2026-03-20 11:27:30 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-20 11:27 | 更新:水溪整理归档、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
| 2026-03-20 12:22:19 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-20 12:22 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
| 2026-03-20 13:39:57 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-20 12:40 | 更新:水桥平台对接 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
| 2026-03-20 16:08:50 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-20 16:08 | 更新:水桥平台对接、卡木、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |