🔄 卡若AI 同步 2026-03-12 12:20 | 更新:水桥平台对接、卡木、火炬、运营中枢参考资料、运营中枢工作台 | 排除 >20MB: 11 个
This commit is contained in:
312
02_卡人(水)/水桥_平台对接/智能纪要/脚本/find_oldest_long_video_minute.py
Normal file
312
02_卡人(水)/水桥_平台对接/智能纪要/脚本/find_oldest_long_video_minute.py
Normal file
@@ -0,0 +1,312 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
飞书妙记:查找「最早 + 时长≥1小时 + 有画面」的一条。
|
||||
|
||||
用法:
|
||||
python3 find_oldest_long_video_minute.py
|
||||
python3 find_oldest_long_video_minute.py --list-only # 只拉列表并保存,不查 status
|
||||
python3 find_oldest_long_video_minute.py --max-status 100 # 只对最早 100 条查 status(默认 300)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent
|
||||
COOKIE_FILE = SCRIPT_DIR / "cookie_minutes.txt"
|
||||
TOKEN_LIST_FILE = Path("/tmp/feishu_all_minutes_tokens.json")
|
||||
FULL_LIST_FILE = Path("/tmp/feishu_all_minutes_full.json") # 含 duration(list API 返回,单位毫秒)
|
||||
STATUS_URL = "https://cunkebao.feishu.cn/minutes/api/status"
|
||||
REFERER = "https://cunkebao.feishu.cn/minutes/"
|
||||
MIN_DURATION_MS = 3600 * 1000 # 1 小时,毫秒
|
||||
|
||||
|
||||
def _cookie_from_cursor_browser() -> str:
|
||||
try:
|
||||
import sqlite3
|
||||
import shutil
|
||||
import tempfile
|
||||
cookie_path = Path.home() / "Library/Application Support/Cursor/Partitions/cursor-browser/Cookies"
|
||||
if not cookie_path.exists():
|
||||
return ""
|
||||
tmp = tempfile.mktemp(suffix=".db")
|
||||
shutil.copy2(cookie_path, tmp)
|
||||
conn = sqlite3.connect(tmp)
|
||||
cur = conn.cursor()
|
||||
cur.execute(
|
||||
"SELECT name, value FROM cookies WHERE (host_key LIKE '%feishu%' OR host_key LIKE '%cunkebao%') AND value != ''"
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
conn.close()
|
||||
Path(tmp).unlink(missing_ok=True)
|
||||
if rows:
|
||||
s = "; ".join([f"{name}={value}" for name, value in rows])
|
||||
if len(s) > 100:
|
||||
return s
|
||||
except Exception:
|
||||
pass
|
||||
return ""
|
||||
|
||||
|
||||
def get_cookie() -> str:
|
||||
cookie = os.environ.get("FEISHU_MINUTES_COOKIE", "").strip()
|
||||
if cookie and len(cookie) > 100 and "PASTE_YOUR" not in cookie:
|
||||
return cookie
|
||||
if COOKIE_FILE.exists():
|
||||
raw = COOKIE_FILE.read_text(encoding="utf-8", errors="ignore").strip().splitlines()
|
||||
for line in raw:
|
||||
line = line.strip()
|
||||
if line and not line.startswith("#") and "PASTE_YOUR" not in line:
|
||||
return line
|
||||
return _cookie_from_cursor_browser()
|
||||
|
||||
|
||||
def get_csrf(cookie: str) -> str:
|
||||
for name in ("bv_csrf_token=", "minutes_csrf_token="):
|
||||
i = cookie.find(name)
|
||||
if i != -1:
|
||||
start = i + len(name)
|
||||
end = cookie.find(";", start)
|
||||
if end == -1:
|
||||
end = len(cookie)
|
||||
return cookie[start:end].strip()
|
||||
return ""
|
||||
|
||||
|
||||
def duration_to_seconds(val) -> int:
|
||||
"""把 API 返回的 duration 转为秒数。支持 int、'2小时47分钟52秒'、'2h47m52s' 等。"""
|
||||
if val is None:
|
||||
return 0
|
||||
if isinstance(val, (int, float)):
|
||||
# 若大于 10000 可能是毫秒
|
||||
v = int(val)
|
||||
return v if v < 10000 else v // 1000
|
||||
s = str(val).strip()
|
||||
if not s:
|
||||
return 0
|
||||
# 中文:2小时47分钟52秒 / 47分钟52秒
|
||||
m = re.search(r"(?:(\d+)\s*[小時时])?\s*(?:(\d+)\s*[分分钟])?\s*(\d+)\s*[秒]?", s)
|
||||
if m:
|
||||
h, mi, sec = int(m.group(1) or 0), int(m.group(2) or 0), int(m.group(3) or 0)
|
||||
return h * 3600 + mi * 60 + sec
|
||||
# 英文/数字:2h47m52s 或 10072
|
||||
m = re.search(r"(?:(\d+)\s*h)?\s*(?:(\d+)\s*m)?\s*(\d+)\s*s?", s, re.I)
|
||||
if m:
|
||||
h, mi, sec = int(m.group(1) or 0), int(m.group(2) or 0), int(m.group(3) or 0)
|
||||
return h * 3600 + mi * 60 + sec
|
||||
try:
|
||||
return int(float(s))
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def fetch_list(cookie: str, max_pages: int = 50) -> list:
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
|
||||
"Cookie": cookie,
|
||||
"Referer": REFERER,
|
||||
}
|
||||
if get_csrf(cookie):
|
||||
headers["bv-csrf-token"] = get_csrf(cookie)
|
||||
all_items = []
|
||||
last_ts = ""
|
||||
for page in range(1, max_pages + 1):
|
||||
url = "https://cunkebao.feishu.cn/minutes/api/space/list?size=50&space_name=1"
|
||||
if last_ts:
|
||||
url += f"&last_time={last_ts}"
|
||||
try:
|
||||
r = requests.get(url, headers=headers, timeout=30)
|
||||
if r.status_code != 200:
|
||||
break
|
||||
data = r.json()
|
||||
if data.get("code") != 0:
|
||||
break
|
||||
items = data.get("data", {}).get("list", [])
|
||||
if not items:
|
||||
break
|
||||
all_items.extend(items)
|
||||
last_ts = items[-1].get("create_time", "")
|
||||
t = items[-1].get("create_time", "")
|
||||
if t:
|
||||
try:
|
||||
ts = int(t)
|
||||
dt = datetime.fromtimestamp(ts / 1000) if ts > 1e12 else datetime.fromtimestamp(ts)
|
||||
print(f" 页{page}: +{len(items)} 条, 本页最早: {dt}")
|
||||
except Exception:
|
||||
print(f" 页{page}: +{len(items)} 条")
|
||||
if len(items) < 50:
|
||||
break
|
||||
time.sleep(0.35)
|
||||
except Exception as e:
|
||||
print(f" 页{page} 错误: {e}")
|
||||
break
|
||||
return all_items
|
||||
|
||||
|
||||
def get_status(cookie: str, object_token: str) -> dict | None:
|
||||
"""GET status,返回 data 字典;失败返回 None。"""
|
||||
headers = {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
|
||||
"Cookie": cookie,
|
||||
"Referer": REFERER,
|
||||
}
|
||||
if get_csrf(cookie):
|
||||
headers["bv-csrf-token"] = get_csrf(cookie)
|
||||
url = f"{STATUS_URL}?object_token={object_token}&language=zh_cn&_t={int(time.time() * 1000)}"
|
||||
try:
|
||||
r = requests.get(url, headers=headers, timeout=20)
|
||||
if r.status_code != 200:
|
||||
return None
|
||||
data = r.json()
|
||||
if data.get("code") != 0:
|
||||
return None
|
||||
return data.get("data")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def main() -> int:
|
||||
if not requests:
|
||||
print("❌ 需要 requests: pip install requests", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
list_only = "--list-only" in sys.argv
|
||||
max_status = 300
|
||||
for i, arg in enumerate(sys.argv):
|
||||
if arg == "--max-status" and i + 1 < len(sys.argv):
|
||||
try:
|
||||
max_status = int(sys.argv[i + 1])
|
||||
except ValueError:
|
||||
pass
|
||||
break
|
||||
|
||||
cookie = get_cookie()
|
||||
if not cookie or len(cookie) < 100:
|
||||
print("❌ 未配置有效 Cookie(见 SKILL 或 cookie_minutes.txt / Cursor 浏览器)", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
# 1) 列表:优先用已保存的完整列表(含 duration,list API 单位毫秒)
|
||||
if FULL_LIST_FILE.exists():
|
||||
try:
|
||||
raw = json.loads(FULL_LIST_FILE.read_text(encoding="utf-8"))
|
||||
items = [x for x in raw if x.get("object_token")]
|
||||
print(f"✅ 已加载 {len(items)} 条妙记(来自 {FULL_LIST_FILE})")
|
||||
except Exception as e:
|
||||
print(f"⚠️ 读取完整列表失败: {e},改为拉取")
|
||||
items = []
|
||||
elif TOKEN_LIST_FILE.exists():
|
||||
try:
|
||||
raw = json.loads(TOKEN_LIST_FILE.read_text(encoding="utf-8"))
|
||||
items = [x for x in raw if x.get("object_token")]
|
||||
print(f"✅ 已加载 {len(items)} 条(来自 {TOKEN_LIST_FILE},无 duration,将仅按「有视频」筛选)")
|
||||
except Exception:
|
||||
items = []
|
||||
else:
|
||||
items = []
|
||||
|
||||
if not items:
|
||||
print("拉取妙记列表…")
|
||||
items = fetch_list(cookie, max_pages=100)
|
||||
if not items:
|
||||
print("❌ 未拉取到任何妙记")
|
||||
return 1
|
||||
items.sort(key=lambda x: int(x.get("create_time") or 0))
|
||||
FULL_LIST_FILE.write_text(json.dumps(items, ensure_ascii=False), encoding="utf-8")
|
||||
TOKEN_LIST_FILE.write_text(
|
||||
json.dumps(
|
||||
[{"object_token": x.get("object_token"), "topic": x.get("topic"), "create_time": x.get("create_time")} for x in items],
|
||||
ensure_ascii=False,
|
||||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
print(f"✅ 共 {len(items)} 条,已保存到 {FULL_LIST_FILE}")
|
||||
|
||||
# 按 create_time 升序(最早在前)
|
||||
items.sort(key=lambda x: int(x.get("create_time") or 0))
|
||||
if items:
|
||||
t0 = int(items[0].get("create_time") or 0)
|
||||
t1 = int(items[-1].get("create_time") or 0)
|
||||
for ts, label in [(t0, "最早"), (t1, "最新")]:
|
||||
dt = datetime.fromtimestamp(ts / 1000) if ts > 1e12 else datetime.fromtimestamp(ts)
|
||||
print(f" {label}一条: {dt} | {items[0].get('topic', '')[:50] if label == '最早' else items[-1].get('topic', '')[:50]}")
|
||||
|
||||
if list_only:
|
||||
return 0
|
||||
|
||||
# 2) 用 list 的 duration(毫秒)筛「时长≥1小时」;再按 create_time 升序,逐条查 status 筛「有视频」
|
||||
dur_ms_key = "duration"
|
||||
has_duration = any(it.get(dur_ms_key) is not None for it in items)
|
||||
if has_duration:
|
||||
long_items = [it for it in items if (int(it.get(dur_ms_key) or 0)) >= MIN_DURATION_MS]
|
||||
print(f"\n列表中含 duration≥1小时 的共 {len(long_items)} 条(按 create_time 从早到晚检查是否有视频)")
|
||||
else:
|
||||
long_items = items
|
||||
print("\n⚠️ 列表无 duration 字段(可能来自旧版 token 列表),按 create_time 从早到晚检查「有视频」;时长请手动核对。")
|
||||
|
||||
long_items.sort(key=lambda x: int(x.get("create_time") or 0))
|
||||
to_check = long_items[: max_status]
|
||||
print(f"对最早 {len(to_check)} 条请求 status(需含 video_download_url)…")
|
||||
candidates = []
|
||||
for i, it in enumerate(to_check):
|
||||
token = it.get("object_token")
|
||||
if not token:
|
||||
continue
|
||||
create_time = int(it.get("create_time") or 0)
|
||||
topic = (it.get("topic") or "")[:60]
|
||||
dur_ms = int(it.get(dur_ms_key) or 0)
|
||||
dur_sec = dur_ms // 1000 if dur_ms else 0
|
||||
data = get_status(cookie, token)
|
||||
if not data:
|
||||
if (i + 1) % 50 == 0:
|
||||
print(f" 已查 {i + 1}/{len(to_check)} …")
|
||||
time.sleep(0.2)
|
||||
continue
|
||||
video_info = data.get("video_info") or {}
|
||||
video_url = video_info.get("video_download_url") if isinstance(video_info, dict) else None
|
||||
if not video_url or not isinstance(video_url, str):
|
||||
time.sleep(0.2)
|
||||
continue
|
||||
dt_str = ""
|
||||
if create_time:
|
||||
dt_str = (datetime.fromtimestamp(create_time / 1000) if create_time > 1e12 else datetime.fromtimestamp(create_time)).strftime("%Y-%m-%d %H:%M")
|
||||
# 按 object_token 去重,同一场只记一次
|
||||
if not any(c["object_token"] == token for c in candidates):
|
||||
candidates.append({
|
||||
"object_token": token,
|
||||
"topic": topic,
|
||||
"create_time": create_time,
|
||||
"date_str": dt_str,
|
||||
"duration_sec": dur_sec,
|
||||
"video_url": video_url[:80] + "…" if len(video_url) > 80 else video_url,
|
||||
})
|
||||
print(f" ✓ 符合: {dt_str} | {dur_sec}s ({dur_sec // 3600}h{(dur_sec % 3600) // 60}m) | {topic[:40]}")
|
||||
time.sleep(0.25)
|
||||
|
||||
if not candidates:
|
||||
print("\n未找到「最早 + 时长≥1小时 + 有画面」的妙记(在当前列表与检查条数范围内)。")
|
||||
return 0
|
||||
|
||||
first = candidates[0]
|
||||
print("\n" + "=" * 60)
|
||||
print("【最早的一条】时长>1小时且含视频:")
|
||||
print(f" 标题: {first['topic']}")
|
||||
print(f" 日期: {first['date_str']}")
|
||||
print(f" 时长: {first['duration_sec']} 秒({first['duration_sec'] // 3600} 小时 {(first['duration_sec'] % 3600) // 60} 分钟)")
|
||||
print(f" object_token: {first['object_token']}")
|
||||
print(f" 链接: https://cunkebao.feishu.cn/minutes/{first['object_token']}")
|
||||
print("=" * 60)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"access_token": "u-dHEj_.UY90dV14HLKrj6Dnlh34b1ghghOMGaZNk0220V",
|
||||
"refresh_token": "ur-dphocXbQpdvVav078ZVRqdlh3KFxghMVrgGaYA40225Z",
|
||||
"access_token": "u-fC.wuuRp91Ha5dCmygjeK5lh1CzxghOPiMGaUwk0261U",
|
||||
"refresh_token": "ur-c49nCzxYZcNEgDW6CQEb0Rlh1AHxghqXqwGaYNk0270J",
|
||||
"name": "飞书用户",
|
||||
"auth_time": "2026-03-11T20:37:17.022220"
|
||||
"auth_time": "2026-03-12T12:18:56.479463"
|
||||
}
|
||||
130
02_卡人(水)/水桥_平台对接/飞书管理/脚本/add_xmind_reading_note.py
Normal file
130
02_卡人(水)/水桥_平台对接/飞书管理/脚本/add_xmind_reading_note.py
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
向 读书笔记.xmind 的「一、个人提升」下添加《人选天选论》节点。
|
||||
格式:中心为书名+卡若读书笔记;连接 总结/描述/人物/问题/金句;金水木火土为独立节点(detached),不连书名,位置对齐。
|
||||
"""
|
||||
import json
|
||||
import zipfile
|
||||
import uuid
|
||||
import os
|
||||
import shutil
|
||||
|
||||
XMIND_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind"
|
||||
BACKUP_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind.bak"
|
||||
|
||||
def gen_id():
|
||||
return uuid.uuid4().hex[:22]
|
||||
|
||||
def make_topic(title, **kw):
|
||||
t = {"id": gen_id(), "class": "topic", "title": title}
|
||||
t.update(kw)
|
||||
return t
|
||||
|
||||
def main():
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z:
|
||||
content = json.loads(z.read("content.json"))
|
||||
sheet = content[0]
|
||||
root = sheet["rootTopic"]
|
||||
attached = root["children"]["attached"]
|
||||
|
||||
# 找到 一、个人提升
|
||||
for node in attached:
|
||||
if node.get("title") != "一、个人提升":
|
||||
continue
|
||||
children = node.setdefault("children", {})
|
||||
book_attached = children.setdefault("attached", [])
|
||||
|
||||
# 人选天选论:中心节点 + 与书名连接的 5 支
|
||||
center_id = gen_id()
|
||||
topic_summary = make_topic("总结", **{
|
||||
"children": {"attached": [
|
||||
make_topic("运气来源于选择与人选;人选与混沌因果交替成别人的天选;福祸只在内心贪惧。"),
|
||||
make_topic("定义层/因果层/案例层/应用层;人选可优化、天选交概率。"),
|
||||
]}
|
||||
})
|
||||
topic_desc = make_topic("描述", **{
|
||||
"children": {"attached": [
|
||||
make_topic("选福选祸定义:贪惧×拿起/放下;人选=我之选,天选=其余;运气=别人选择。"),
|
||||
make_topic("捡钱包得福天选祸;退钱给讹钱者=人选祸天选福;面试官三选一;外卖-车祸-复仇链。"),
|
||||
]}
|
||||
})
|
||||
topic_chars = make_topic("人物", **{
|
||||
"children": {"attached": [
|
||||
make_topic("讲述者:NT 力量+完美 老虎+猫头鹰"),
|
||||
make_topic("面试官:力量+活跃 老虎+孔雀"),
|
||||
make_topic("外卖小哥:和平/无尾熊"),
|
||||
make_topic("面店老板:孔雀+无尾熊"),
|
||||
make_topic("法拉利车主:力量+活跃 老虎+孔雀"),
|
||||
make_topic("外卖小哥亲人:力量 老虎"),
|
||||
]}
|
||||
})
|
||||
topic_questions = make_topic("问题", **{
|
||||
"children": {"attached": [
|
||||
make_topic("私域里的运气如何用人选天选拆解?"),
|
||||
make_topic("云阿米巴分钱与合伙如何对应选福/选祸?"),
|
||||
make_topic("一人公司如何用承担代价做差异化?"),
|
||||
]}
|
||||
})
|
||||
topic_quotes = make_topic("金句", **{
|
||||
"children": {"attached": [
|
||||
make_topic("1.运气来源于选择,人选与混沌因果交替变成别人的天选"),
|
||||
make_topic("2.关照内心恐惧和贪婪,就能看清福祸利险"),
|
||||
make_topic("3.你的运气就是别人的选择,别人的运气就是你的选择"),
|
||||
make_topic("4.愿意为选择与结果承担一切代价,让世界恐惧你"),
|
||||
make_topic("5.福祸只在心中贪惧产生,不因他人评判改变"),
|
||||
make_topic("关键字:人选 · 天选 · 运气 · 贪婪与恐惧 · 选福选祸"),
|
||||
]}
|
||||
})
|
||||
|
||||
# 与中心连接的 5 个主支(总结/描述/人物/问题/金句)
|
||||
book_node = {
|
||||
"id": center_id,
|
||||
"class": "topic",
|
||||
"title": "人选天选论",
|
||||
"notes": {"plain": {"content": "卡若读书笔记\n运气来源于选择与人选;福祸只在内心贪惧。"}},
|
||||
"branch": "folded",
|
||||
"children": {
|
||||
"attached": [topic_summary, topic_desc, topic_chars, topic_questions, topic_quotes],
|
||||
# 金水木火土:独立节点,不连书名,按模板位置对齐(金上中、水右上、土左中、火左下、木右下)
|
||||
"detached": [
|
||||
make_topic("金", position={"x": 0, "y": -240}),
|
||||
make_topic("水", position={"x": 260, "y": -200}),
|
||||
make_topic("土", position={"x": -260, "y": 0}),
|
||||
make_topic("火", position={"x": -260, "y": 220}),
|
||||
make_topic("木", position={"x": 260, "y": 220}),
|
||||
]
|
||||
}
|
||||
}
|
||||
# 为五行填写内容(放在 notes 或 子节点,detached 通常只显示标题,用 notes 存内容)
|
||||
wuxing_content = {
|
||||
"金": "选福选祸定义;人选=我/天选=其余;运气=别人选择;福祸不挂钩道德",
|
||||
"水": "捡钱包与退钱;面试官三选一;外卖-面店-车祸-复仇链",
|
||||
"木": "关照贪惧;选祸得福;承担一切代价",
|
||||
"火": "微观归因混沌;世界只剩我与天",
|
||||
"土": "选择主体不唯人;我之外即天",
|
||||
}
|
||||
for d in book_node["children"]["detached"]:
|
||||
title = d["title"]
|
||||
if title in wuxing_content:
|
||||
d["notes"] = {"plain": {"content": wuxing_content[title]}}
|
||||
|
||||
book_attached.append(book_node)
|
||||
break
|
||||
else:
|
||||
print("未找到「一、个人提升」")
|
||||
return
|
||||
|
||||
# 备份并写回
|
||||
shutil.copy(XMIND_PATH, BACKUP_PATH)
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z_in:
|
||||
with zipfile.ZipFile(XMIND_PATH + ".tmp", "w", zipfile.ZIP_STORED) as z_out:
|
||||
for name in z_in.namelist():
|
||||
if name == "content.json":
|
||||
z_out.writestr(name, json.dumps(content, ensure_ascii=False, indent=0))
|
||||
else:
|
||||
z_out.writestr(name, z_in.read(name))
|
||||
os.replace(XMIND_PATH + ".tmp", XMIND_PATH)
|
||||
print("已添加「人选天选论」到 读书笔记.xmind(一、个人提升下);金水木火土为独立节点。备份:", BACKUP_PATH)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
194
02_卡人(水)/水桥_平台对接/飞书管理/脚本/reading_note_send_webhook.py
Normal file
194
02_卡人(水)/水桥_平台对接/飞书管理/脚本/reading_note_send_webhook.py
Normal file
@@ -0,0 +1,194 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
读书笔记完整发布流程:
|
||||
1. 把脑图图片复制到读书笔记目录
|
||||
2. 在 MD 文章里插入/更新脑图图片引用
|
||||
3. 上传图片到飞书(通过本地 feishu_api 获取 image_key)
|
||||
4. 发送富文本消息(文章摘要 + 图片)到飞书群 webhook
|
||||
|
||||
用法:
|
||||
python3 reading_note_send_webhook.py \
|
||||
--md "/path/to/读书笔记.md" \
|
||||
--img "/path/to/脑图.png" \
|
||||
--webhook "https://open.feishu.cn/open-apis/bot/v2/hook/xxx"
|
||||
"""
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import requests
|
||||
from pathlib import Path
|
||||
|
||||
# ─── 本地飞书 API 服务 ───────────────────────────────────────────────
|
||||
LOCAL_API = "http://127.0.0.1:5050"
|
||||
|
||||
|
||||
def get_tenant_token(app_id: str, app_secret: str) -> str | None:
|
||||
"""获取飞书 tenant_access_token(im/v1/images 上传必须用此 token)"""
|
||||
try:
|
||||
r = requests.post(
|
||||
"https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal",
|
||||
json={"app_id": app_id, "app_secret": app_secret},
|
||||
timeout=15,
|
||||
)
|
||||
data = r.json()
|
||||
if data.get("code") == 0:
|
||||
return data.get("tenant_access_token")
|
||||
except Exception as e:
|
||||
print(f"⚠️ 获取 tenant token 失败: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def upload_image(token: str, img_path: Path) -> str | None:
|
||||
"""上传图片到飞书(使用 tenant_access_token),返回 image_key"""
|
||||
if not img_path.exists():
|
||||
print(f"⚠️ 图片不存在: {img_path}")
|
||||
return None
|
||||
url = "https://open.feishu.cn/open-apis/im/v1/images"
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
with open(img_path, "rb") as f:
|
||||
resp = requests.post(
|
||||
url, headers=headers,
|
||||
data={"image_type": "message"},
|
||||
files={"image": (img_path.name, f, "image/png")},
|
||||
timeout=60,
|
||||
)
|
||||
data = resp.json()
|
||||
if data.get("code") == 0:
|
||||
key = data["data"]["image_key"]
|
||||
print(f"✅ 图片已上传,image_key: {key}")
|
||||
return key
|
||||
print(f"⚠️ 图片上传失败: {data.get('msg')} code={data.get('code')}")
|
||||
return None
|
||||
|
||||
|
||||
def read_md_summary(md_path: Path, max_chars=800) -> str:
|
||||
"""读取 MD 文件,提取一句话总结 + 金句(前 max_chars 字符)"""
|
||||
text = md_path.read_text(encoding="utf-8")
|
||||
lines = text.split("\n")
|
||||
summary_lines = []
|
||||
capture = False
|
||||
for line in lines:
|
||||
if "一句话" in line or "金句" in line or "关键词" in line:
|
||||
capture = True
|
||||
if capture:
|
||||
stripped = line.strip()
|
||||
if stripped and not stripped.startswith("#"):
|
||||
summary_lines.append(stripped)
|
||||
if len("\n".join(summary_lines)) > max_chars:
|
||||
break
|
||||
return "\n".join(summary_lines[:30]) if summary_lines else text[:max_chars]
|
||||
|
||||
|
||||
def build_post_message(title: str, summary: str, image_key: str | None) -> dict:
|
||||
"""构建飞书富文本 post 消息"""
|
||||
content_rows = []
|
||||
# 添加文字摘要
|
||||
for line in summary.split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
content_rows.append([{"tag": "text", "text": line}])
|
||||
# 加分隔
|
||||
content_rows.append([{"tag": "text", "text": "──────────────"}])
|
||||
# 图片行
|
||||
if image_key:
|
||||
content_rows.append([{"tag": "img", "image_key": image_key}])
|
||||
# 尾部标注
|
||||
content_rows.append([{"tag": "text", "text": "📖 卡若读书笔记 · 五行拆书法"}])
|
||||
return {
|
||||
"msg_type": "post",
|
||||
"content": {
|
||||
"post": {
|
||||
"zh_cn": {
|
||||
"title": f"📚 {title}",
|
||||
"content": content_rows,
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def send_webhook(webhook: str, payload: dict) -> bool:
|
||||
"""发送消息到飞书群 webhook"""
|
||||
resp = requests.post(webhook, json=payload, timeout=15)
|
||||
data = resp.json()
|
||||
if data.get("code") == 0 or data.get("StatusCode") == 0:
|
||||
print(f"✅ 飞书群消息发送成功")
|
||||
return True
|
||||
print(f"⚠️ 发送失败: {data}")
|
||||
return False
|
||||
|
||||
|
||||
def update_md_with_image(md_path: Path, img_filename: str) -> None:
|
||||
"""在 MD 文件顶部的「思维导图」区域更新图片引用(若已存在则跳过)"""
|
||||
text = md_path.read_text(encoding="utf-8")
|
||||
img_ref = f""
|
||||
if img_ref in text:
|
||||
print(f"✅ MD 中已有图片引用 {img_filename},无需更新")
|
||||
return
|
||||
# 在一句话总结前插入图片区块
|
||||
insert_block = f"\n## 脑图\n\n{img_ref}\n\n"
|
||||
if "## 一、一句话" in text:
|
||||
text = text.replace("## 一、一句话", insert_block + "## 一、一句话", 1)
|
||||
elif "## 思维导图" not in text:
|
||||
# 找到第一个 ## 标题前插入
|
||||
text = re.sub(r"(^---\n\n## )", insert_block + r"\1", text, count=1, flags=re.MULTILINE)
|
||||
md_path.write_text(text, encoding="utf-8")
|
||||
print(f"✅ MD 已更新图片引用: {img_filename}")
|
||||
|
||||
|
||||
def main():
|
||||
ap = argparse.ArgumentParser()
|
||||
ap.add_argument("--md", required=True, help="读书笔记 MD 路径")
|
||||
ap.add_argument("--img", required=True, help="脑图图片路径")
|
||||
ap.add_argument("--webhook", required=True, help="飞书群 webhook URL")
|
||||
ap.add_argument("--title", default="", help="书名(默认从 MD 文件名提取)")
|
||||
args = ap.parse_args()
|
||||
|
||||
md_path = Path(args.md).expanduser().resolve()
|
||||
img_path = Path(args.img).expanduser().resolve()
|
||||
webhook = args.webhook
|
||||
|
||||
if not md_path.exists():
|
||||
print(f"❌ MD 不存在: {md_path}")
|
||||
return
|
||||
if not img_path.exists():
|
||||
print(f"❌ 图片不存在: {img_path}")
|
||||
return
|
||||
|
||||
# 1. 复制图片到 MD 同目录
|
||||
dest_img = md_path.parent / img_path.name
|
||||
if dest_img != img_path:
|
||||
shutil.copy(img_path, dest_img)
|
||||
print(f"✅ 图片已复制到: {dest_img}")
|
||||
|
||||
# 2. 更新 MD 图片引用
|
||||
update_md_with_image(md_path, img_path.name)
|
||||
|
||||
# 3. 获取 tenant token 并上传图片
|
||||
title = args.title or md_path.stem.replace("_读书笔记", "").replace("_", " ")
|
||||
summary = read_md_summary(md_path)
|
||||
|
||||
APP_ID = "cli_a48818290ef8100d"
|
||||
APP_SECRET = "dhjU0qWd5AzicGWTf4cTqhCWJOrnuCk4"
|
||||
token = get_tenant_token(APP_ID, APP_SECRET)
|
||||
image_key = None
|
||||
if token:
|
||||
image_key = upload_image(token, img_path)
|
||||
else:
|
||||
print("⚠️ 获取 tenant token 失败,将只发文字消息")
|
||||
|
||||
# 4. 发送 webhook
|
||||
payload = build_post_message(title, summary, image_key)
|
||||
send_webhook(webhook, payload)
|
||||
|
||||
print(f"\n✅ 全部完成!")
|
||||
print(f" MD: {md_path}")
|
||||
print(f" 图片: {dest_img}")
|
||||
print(f" 飞书群: {webhook}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
199
02_卡人(水)/水桥_平台对接/飞书管理/脚本/xmind_人选天选论_修复.py
Normal file
199
02_卡人(水)/水桥_平台对接/飞书管理/脚本/xmind_人选天选论_修复.py
Normal file
@@ -0,0 +1,199 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
修复 人选天选论 sheet(index 2):
|
||||
1. 将 sheet.title 改为「人选天选论」(使 tab 显示书名而非「读书笔记模板」)
|
||||
2. 五行 detached 节点添加可见子主题(符号+精简内容)
|
||||
3. 人物节点结构:人名→各角色→MBTI/DISC/PDP 正确分层
|
||||
4. 总结/描述/金句/问题 精简符号化
|
||||
"""
|
||||
import json, zipfile, uuid, os, shutil
|
||||
|
||||
XMIND_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind"
|
||||
BACKUP_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind.bak2"
|
||||
|
||||
def gid(): return uuid.uuid4().hex[:22]
|
||||
|
||||
def tp(title, children=None, notes=None, position=None, markers=None, labels=None, branch=None):
|
||||
t = {"id": gid(), "class": "topic", "title": title}
|
||||
if children: t["children"] = {"attached": children}
|
||||
if notes: t["notes"] = {"plain": {"content": notes}}
|
||||
if position: t["position"] = position
|
||||
if markers: t["markers"] = markers
|
||||
if labels: t["labels"] = labels
|
||||
if branch: t["branch"] = branch
|
||||
return t
|
||||
|
||||
def main():
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z:
|
||||
raw = z.read("content.json")
|
||||
content = json.loads(raw)
|
||||
|
||||
# ---- 找到 人选天选论 sheet ----
|
||||
sheet = None
|
||||
for s in content:
|
||||
if s.get("rootTopic", {}).get("title") == "人选天选论":
|
||||
sheet = s
|
||||
break
|
||||
if not sheet:
|
||||
print("❌ 未找到 人选天选论 sheet")
|
||||
return
|
||||
|
||||
# 1. 修正 tab 标题
|
||||
sheet["title"] = "人选天选论"
|
||||
|
||||
rt = sheet["rootTopic"]
|
||||
# 中心标签
|
||||
rt["labels"] = ["卡若读书笔记"]
|
||||
|
||||
# ---- 2. 五行 detached 节点:添加可见子主题 ----
|
||||
wuxing_children = {
|
||||
"金": [
|
||||
tp("📌 定位·目标"),
|
||||
tp("⚡ 选福:贪中拿/放 或 惧中放"),
|
||||
tp("⚠️ 选祸:惧中拿起"),
|
||||
tp("🔑 人选 = 我的贪惧之选"),
|
||||
tp("🌐 天选 = 我之外一切之选"),
|
||||
tp("🎲 运气 = 别人的人选"),
|
||||
],
|
||||
"水": [
|
||||
tp("📌 过程·事件"),
|
||||
tp("🔁 捡钱包选福 → 天选祸"),
|
||||
tp("🔁 退让讹钱选祸 → 天选福"),
|
||||
tp("🔁 面试官三选一 → 你成别人天选"),
|
||||
tp("🔁 外卖 → 面店 → 车祸 → 复仇"),
|
||||
tp("🔁 鹅吃草=福,鸭吃草=祸(本性不同)"),
|
||||
],
|
||||
"木": [
|
||||
tp("📌 落地·心法"),
|
||||
tp("✅ 觉察内心贪惧 → 看清福祸"),
|
||||
tp("✅ 选祸 = 放贪 / 直面惧 → 天选福"),
|
||||
tp("✅ 承担一切代价 → 持久敬畏"),
|
||||
],
|
||||
"火": [
|
||||
tp("📌 模型·升级"),
|
||||
tp("💡 微观不可测 → 归因「混沌」"),
|
||||
tp("💡 我 = 贪惧之选 / 天 = 其余一切"),
|
||||
tp("💡 薛定谔:观测即改变"),
|
||||
],
|
||||
"土": [
|
||||
tp("📌 系统·放大"),
|
||||
tp("🌐 选择主体不唯人,万物皆有倾向"),
|
||||
tp("🌐 我之外即天"),
|
||||
tp("🌐 人选 × 天选叠加 = 运气机制"),
|
||||
],
|
||||
}
|
||||
detached = rt.get("children", {}).get("detached", [])
|
||||
for d in detached:
|
||||
name = d.get("title")
|
||||
if name in wuxing_children:
|
||||
d["children"] = {"attached": wuxing_children[name]}
|
||||
if "notes" in d:
|
||||
del d["notes"]
|
||||
|
||||
# ---- 3. 重建 attached(总结/描述/人物/问题/金句)保持位置 ----
|
||||
old_positions = {}
|
||||
for a in rt.get("children", {}).get("attached", []):
|
||||
old_positions[a.get("title")] = a.get("position")
|
||||
|
||||
def pos(name): return old_positions.get(name)
|
||||
|
||||
# 总结
|
||||
p_总结 = tp("总结", position=pos("总结"), children=[
|
||||
tp("💎 一句话", children=[
|
||||
tp("运气 = 选择叠加;你的人选与混沌交替,成别人天选"),
|
||||
]),
|
||||
tp("🗂️ 四层结构", children=[
|
||||
tp("① 定义层:贪惧 × 拿放 → 选福/选祸"),
|
||||
tp("② 因果层:人选→天选→运气"),
|
||||
tp("③ 案例层:面试/外卖/鹅鸭"),
|
||||
tp("④ 应用层:觉察贪惧·承担代价"),
|
||||
]),
|
||||
tp("🎯 对卡若的价值", children=[
|
||||
tp("把运气变成可设计的人选工程"),
|
||||
tp("选祸得福 = 云阿米巴让利逻辑"),
|
||||
]),
|
||||
])
|
||||
|
||||
# 描述
|
||||
p_描述 = tp("描述", position=pos("描述"), children=[
|
||||
tp("🔑 核心定义", children=[
|
||||
tp("贪中拿起 = 选福"),
|
||||
tp("惧中放下 = 选福"),
|
||||
tp("惧中拿起 = 选祸"),
|
||||
]),
|
||||
tp("🔄 运作机制", children=[
|
||||
tp("人选 → 交混沌 → 成别人天选 → 形成运气"),
|
||||
]),
|
||||
tp("⚖️ 无对错,只在各自内心贪惧"),
|
||||
])
|
||||
|
||||
# 人物(与模板一致:人名 → 各角色 → MBTI/DISC/PDP)
|
||||
roles = [
|
||||
tp("讲述者(作者)", branch="folded", children=[
|
||||
tp("MBTI: INTP/ENTP"),
|
||||
tp("DISC: 力量+完美"),
|
||||
tp("PDP: 老虎+猫头鹰"),
|
||||
tp("逻辑建模,定义严谨"),
|
||||
]),
|
||||
tp("面试官", branch="folded", children=[
|
||||
tp("MBTI: ESTJ"),
|
||||
tp("DISC: 力量+活跃"),
|
||||
tp("PDP: 老虎+孔雀"),
|
||||
tp("利益驱动,关系与面子"),
|
||||
]),
|
||||
tp("外卖小哥", branch="folded", children=[
|
||||
tp("DISC: 和平"),
|
||||
tp("PDP: 无尾熊"),
|
||||
tp("随境而动,未主动改路径"),
|
||||
]),
|
||||
tp("法拉利车主", branch="folded", children=[
|
||||
tp("MBTI: ESTP"),
|
||||
tp("DISC: 力量+活跃"),
|
||||
tp("PDP: 老虎+孔雀"),
|
||||
tp("即时满足,后果后置"),
|
||||
]),
|
||||
tp("外卖小哥亲人", branch="folded", children=[
|
||||
tp("DISC: 力量"),
|
||||
tp("PDP: 老虎"),
|
||||
tp("为情感承担一切代价"),
|
||||
]),
|
||||
]
|
||||
p_人物 = tp("人物", position=pos("人物"), children=[
|
||||
tp("人名", branch="folded", children=roles),
|
||||
])
|
||||
|
||||
# 问题
|
||||
p_问题 = tp("问题", position=pos("问题"), children=[
|
||||
tp("🎯 私域运气 = 用户人选;如何设计我的人选让对方「天选福」?"),
|
||||
tp("💰 云阿米巴让利 = 选祸→天选福;如何持续验证路径?"),
|
||||
tp("🏆 承担代价 → 差异化信任;如何在私域可感知?"),
|
||||
])
|
||||
|
||||
# 金句
|
||||
p_金句 = tp("金句", position=pos("金句"), children=[
|
||||
tp("1. 运气 = 人选与混沌交替变成别人的天选"),
|
||||
tp("2. 关照内心贪惧 = 看清福祸利险"),
|
||||
tp("3. 你的运气 = 别人的选择;别人的运气 = 你的选择"),
|
||||
tp("4. 愿意承担一切代价的人,让世界恐惧你"),
|
||||
tp("5. 福祸只在贪惧产生,不因他人评判改变"),
|
||||
tp("🔑 关键字", markers=[{"markerId": "c_symbol_pen"}], children=[
|
||||
tp("人选 · 天选 · 运气 · 贪惧 · 选福选祸"),
|
||||
]),
|
||||
])
|
||||
|
||||
rt["children"]["attached"] = [p_总结, p_描述, p_人物, p_问题, p_金句]
|
||||
|
||||
# ---- 写回 ----
|
||||
shutil.copy(XMIND_PATH, BACKUP_PATH)
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z_in:
|
||||
with zipfile.ZipFile(XMIND_PATH + ".tmp", "w", zipfile.ZIP_STORED) as z_out:
|
||||
for name in z_in.namelist():
|
||||
if name == "content.json":
|
||||
z_out.writestr(name, json.dumps(content, ensure_ascii=False, indent=0))
|
||||
else:
|
||||
z_out.writestr(name, z_in.read(name))
|
||||
os.replace(XMIND_PATH + ".tmp", XMIND_PATH)
|
||||
print("✅ 完成:tab 名已改为「人选天选论」;五行/总结/描述/人物/问题/金句 已填充;备份:", BACKUP_PATH)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
159
02_卡人(水)/水桥_平台对接/飞书管理/脚本/xmind_人选天选论_模板表.py
Normal file
159
02_卡人(水)/水桥_平台对接/飞书管理/脚本/xmind_人选天选论_模板表.py
Normal file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
1. 从「读书笔记」表(sheet0)的「一、个人提升」下移除「人选天选论」节点。
|
||||
2. 在「读书笔记模板」表(sheet1《书名》)同格式下,新增独立 sheet「人选天选论」:
|
||||
- 与模板一致:中心书名 + 总结/描述/人物/问题/金句(与书名连接),金水木火土为独立节点(detached)、位置与模板一致。
|
||||
3. 下方标签(各 sheet)对应主题、格式一致。
|
||||
"""
|
||||
import json
|
||||
import zipfile
|
||||
import uuid
|
||||
import os
|
||||
import shutil
|
||||
import copy
|
||||
|
||||
XMIND_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind"
|
||||
BACKUP_PATH = "/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind.bak"
|
||||
|
||||
def gen_id():
|
||||
return uuid.uuid4().hex[:22]
|
||||
|
||||
def deep_copy_with_new_ids(obj, id_map=None):
|
||||
if id_map is None:
|
||||
id_map = {}
|
||||
if isinstance(obj, dict):
|
||||
out = {}
|
||||
for k, v in obj.items():
|
||||
if k == "id" and isinstance(v, str):
|
||||
out[k] = id_map.get(v, gen_id())
|
||||
else:
|
||||
out[k] = deep_copy_with_new_ids(v, id_map)
|
||||
return out
|
||||
if isinstance(obj, list):
|
||||
return [deep_copy_with_new_ids(x, id_map) for x in obj]
|
||||
return obj
|
||||
|
||||
def main():
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z:
|
||||
content = json.loads(z.read("content.json"))
|
||||
|
||||
# 1) 从 sheet0「一、个人提升」下移除「人选天选论」
|
||||
sheet0 = content[0]
|
||||
for node in sheet0.get("rootTopic", {}).get("children", {}).get("attached", []):
|
||||
if node.get("title") != "一、个人提升":
|
||||
continue
|
||||
attached = node.get("children", {}).get("attached", [])
|
||||
node["children"]["attached"] = [x for x in attached if x.get("title") != "人选天选论"]
|
||||
break
|
||||
|
||||
# 2) 取 sheet1 模板(《书名》= 读书笔记模板),收集所有 id 并生成新 id
|
||||
template = copy.deepcopy(content[1])
|
||||
id_map = {}
|
||||
def collect_ids(obj):
|
||||
if isinstance(obj, dict):
|
||||
if "id" in obj and isinstance(obj["id"], str):
|
||||
id_map.setdefault(obj["id"], gen_id())
|
||||
for v in obj.values():
|
||||
collect_ids(v)
|
||||
elif isinstance(obj, list):
|
||||
for x in obj:
|
||||
collect_ids(x)
|
||||
collect_ids(template)
|
||||
|
||||
def replace_ids(obj):
|
||||
if isinstance(obj, dict):
|
||||
if "id" in obj and isinstance(obj["id"], str):
|
||||
obj["id"] = id_map.get(obj["id"], gen_id())
|
||||
for v in obj.values():
|
||||
replace_ids(v)
|
||||
elif isinstance(obj, list):
|
||||
for x in obj:
|
||||
replace_ids(x)
|
||||
replace_ids(template)
|
||||
new_sheet = template
|
||||
rt = new_sheet["rootTopic"]
|
||||
|
||||
# 改为「人选天选论」并填内容
|
||||
rt["title"] = "人选天选论"
|
||||
rt["notes"] = {"plain": {"content": "卡若读书笔记\n运气来源于选择与人选;人选与混沌因果交替成别人的天选;福祸只在内心贪惧。"}}
|
||||
rt["labels"] = ["卡若读书笔记"]
|
||||
if "href" in rt:
|
||||
del rt["href"]
|
||||
|
||||
# 填 总结/描述/人物/问题/金句 下的内容(保持与模板一致结构,只改标题/备注或子节点)
|
||||
attached = rt.get("children", {}).get("attached", [])
|
||||
for node in attached:
|
||||
title = node.get("title")
|
||||
if title == "总结":
|
||||
node.setdefault("children", {}).setdefault("attached", [])
|
||||
node["children"]["attached"] = [
|
||||
{"id": gen_id(), "class": "topic", "title": "运气来源于选择与人选;人选与混沌因果交替成别人的天选;福祸只在内心贪惧。"},
|
||||
{"id": gen_id(), "class": "topic", "title": "定义层/因果层/案例层/应用层;人选可优化、天选交概率。"},
|
||||
]
|
||||
elif title == "描述":
|
||||
node["children"] = {"attached": [
|
||||
{"id": gen_id(), "class": "topic", "title": "选福选祸定义(贪惧×拿起/放下);人选=我之选,天选=其余;运气=别人选择。"},
|
||||
{"id": gen_id(), "class": "topic", "title": "捡钱包得福天选祸;退钱给讹钱者=人选祸天选福;面试官三选一;外卖-车祸-复仇链。"},
|
||||
]}
|
||||
elif title == "人物":
|
||||
# 与模板一致:人名→MBTI/PDP/DISC/九型人格/评价 + 人选天选论角色
|
||||
inner = node.get("children", {}).get("attached", [])
|
||||
if inner and inner[0].get("title") == "人名":
|
||||
inner[0]["children"] = {"attached": [
|
||||
{"id": gen_id(), "class": "topic", "title": "讲述者:NT 力量+完美 老虎+猫头鹰"},
|
||||
{"id": gen_id(), "class": "topic", "title": "面试官:力量+活跃 老虎+孔雀"},
|
||||
{"id": gen_id(), "class": "topic", "title": "外卖小哥/面店老板/法拉利车主/外卖小哥亲人"},
|
||||
{"id": gen_id(), "class": "topic", "title": "MBTI"},
|
||||
{"id": gen_id(), "class": "topic", "title": "PDP"},
|
||||
{"id": gen_id(), "class": "topic", "title": "DISC"},
|
||||
{"id": gen_id(), "class": "topic", "title": "九型人格"},
|
||||
{"id": gen_id(), "class": "topic", "title": "评价"},
|
||||
]}
|
||||
elif title == "问题":
|
||||
node["children"] = {"attached": [
|
||||
{"id": gen_id(), "class": "topic", "title": "私域里的运气如何用人选天选拆解?"},
|
||||
{"id": gen_id(), "class": "topic", "title": "云阿米巴分钱与合伙如何对应选福/选祸?"},
|
||||
{"id": gen_id(), "class": "topic", "title": "一人公司如何用承担代价做差异化?"},
|
||||
]}
|
||||
elif title == "金句":
|
||||
node["children"] = {"attached": [
|
||||
{"id": gen_id(), "class": "topic", "title": "1.运气来源于选择,人选与混沌因果交替变成别人的天选"},
|
||||
{"id": gen_id(), "class": "topic", "title": "2.关照内心恐惧和贪婪,就能看清福祸利险"},
|
||||
{"id": gen_id(), "class": "topic", "title": "3.你的运气就是别人的选择,别人的运气就是你的选择"},
|
||||
{"id": gen_id(), "class": "topic", "title": "4.愿意为选择与结果承担一切代价,让世界恐惧你"},
|
||||
{"id": gen_id(), "class": "topic", "title": "5.福祸只在心中贪惧产生,不因他人评判改变"},
|
||||
{"id": gen_id(), "class": "topic", "title": "关键字", "markers": [{"markerId": "c_symbol_pen"}],
|
||||
"children": {"attached": [{"id": gen_id(), "class": "topic", "title": "人选 · 天选 · 运气 · 贪婪与恐惧 · 选福选祸"}]}},
|
||||
]}
|
||||
|
||||
# 五行 detached:与模板位置完全一致
|
||||
wuxing_notes = {
|
||||
"金": "选福选祸定义;人选=我/天选=其余;运气=别人选择;福祸不挂钩道德",
|
||||
"水": "捡钱包与退钱;面试官三选一;外卖-面店-车祸-复仇链",
|
||||
"木": "关照贪惧;选祸得福;承担一切代价",
|
||||
"火": "微观归因混沌;世界只剩我与天",
|
||||
"土": "选择主体不唯人;我之外即天",
|
||||
}
|
||||
detached = rt.get("children", {}).get("detached", [])
|
||||
# 模板位置:金(-4,-207), 水(240,-50), 木(141,198), 土(-241,-27), 火(-141,197)
|
||||
for d in detached:
|
||||
t = d.get("title")
|
||||
if t in wuxing_notes:
|
||||
d["notes"] = {"plain": {"content": wuxing_notes[t]}}
|
||||
|
||||
# 插入新 sheet 到 sheet1 之后(与模板紧邻,下方标签对应)
|
||||
content.insert(2, new_sheet)
|
||||
|
||||
shutil.copy(XMIND_PATH, BACKUP_PATH)
|
||||
with zipfile.ZipFile(XMIND_PATH, "r") as z_in:
|
||||
with zipfile.ZipFile(XMIND_PATH + ".tmp", "w", zipfile.ZIP_STORED) as z_out:
|
||||
for name in z_in.namelist():
|
||||
if name == "content.json":
|
||||
z_out.writestr(name, json.dumps(content, ensure_ascii=False, indent=0))
|
||||
else:
|
||||
z_out.writestr(name, z_in.read(name))
|
||||
os.replace(XMIND_PATH + ".tmp", XMIND_PATH)
|
||||
print("已处理:1) 从「一、个人提升」移除人选天选论;2) 新增独立表「人选天选论」(与读书笔记模板格式、五行位置一致)。备份:", BACKUP_PATH)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -28,6 +28,10 @@ updated: "2026-03-11"
|
||||
| **快手** | Playwright headless 自动化 | UI 定时 | ~7-30 天 | Cookie 过期 |
|
||||
| **抖音** | 纯 API(VOD + bd-ticket-guard) | API `timing_ts` | ~2-4h | 账号封禁中 |
|
||||
|
||||
> **关于视频号官方 API 边界**:
|
||||
> 按《视频号与腾讯相关 API 整理》结论,微信官方目前**没有开放「短视频上传/发布」接口**;本 Skill 中的视频号发布能力,属于对 `https://channels.weixin.qq.com` 视频号助手网页协议的逆向封装(DFS 上传 + `post_create`),仅在你本机使用,需自行承担协议变更与合规风险。
|
||||
> 官方可控能力(直播记录、橱窗、留资、罗盘数据、本地生活等)的服务端 API 入口为:`https://developers.weixin.qq.com/doc/channels/api/`,如需做直播/橱窗/留资集成,可基于该文档在单独 Skill 中扩展。
|
||||
|
||||
---
|
||||
|
||||
## 二、一键命令
|
||||
|
||||
@@ -120,3 +120,28 @@ python3 channels_api_publish.py
|
||||
| `脚本/channels_login.py` | Playwright 微信扫码登录 |
|
||||
| `脚本/channels_storage_state.json` | Cookie + localStorage 存储 |
|
||||
| `脚本/channels_task_id.txt` | videoClipTaskId 存储 |
|
||||
|
||||
---
|
||||
|
||||
## 六、腾讯官方 API 能力与授权边界(吸收自《视频号与腾讯相关 API 整理》)
|
||||
|
||||
- **官方文档总入口**:`https://developers.weixin.qq.com/doc/channels/api/`
|
||||
- **通用基础接口**:`/cgi-bin/token`、`/cgi-bin/stable_token` 获取 `access_token`,`/cgi-bin/openapi/quota/*` 管理调用额度。
|
||||
- **视频号助手服务端能力**(官方、需 AppID / AppSecret 授权)主要覆盖:
|
||||
- 直播记录与预约:`/channels/ec/finderlive/*`
|
||||
- 橱窗商品管理:`/channels/ec/window/product/*`
|
||||
- 留资组件与数据:`/channels/leads/*`
|
||||
- 罗盘达人数据、本地生活团购等:详见官方文档模块列表。
|
||||
- **小程序联动**:`wx.getChannelsLiveInfo`、`wx.openChannelsLive`、`channel-live` 组件,用于获取/打开视频号直播,与短视频发布无关。
|
||||
|
||||
**重要边界结论:**
|
||||
|
||||
- 微信官方当前 **没有提供**「通过开放平台 API 直接上传 / 发布视频号短视频」的能力。
|
||||
- 短视频官方发布方式只有:
|
||||
- `https://channels.weixin.qq.com` 视频号助手网页端;
|
||||
- 微信客户端内手动发布。
|
||||
|
||||
**本 Skill 的定位:**
|
||||
|
||||
- 短视频发布:基于「视频号助手网页」的 **逆向协议**(`helper_upload_params` + DFS 分片上传 + `post_create`),封装在 `channels_api_publish.py` 中,供你在本机一键调用;协议细节与风险说明见同目录脚本内注释与《视频号与腾讯相关 API 整理》。
|
||||
- 官方 API:若未来微信开放短视频上传/发布接口,可在本 Skill 中新增「官方 API 模式」,与当前逆向模式并存;直播/橱窗/留资等场景建议在单独的官方 API Skill 中按业务拆分(如「视频号直播数据看板」「视频号橱窗管理」「视频号留资同步」等)。
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
name: 读书笔记
|
||||
description: 五行结构化读书笔记。触发词:拆解这本书、写入读书笔记、读书笔记、五行拆书、XMind笔记、卡读、金水木火土。使用金水木火土五行框架拆解书籍,自动写入XMind;本地默认存 2、我写的日记/读书笔记,写完后可同步发飞书知识库对应子目录。
|
||||
description: 五行结构化读书笔记。触发词:拆解这本书、写入读书笔记、读书笔记、五行拆书、XMind笔记、卡读、金水木火土。使用金水木火土五行框架拆解书籍,自动写入XMind(无则安装);导出脑图图片插入MD;发飞书群 webhook + 飞书知识库。任何模型可用。
|
||||
group: 火
|
||||
triggers: 拆解这本书、五行拆书、读书笔记、读书笔记发飞书
|
||||
triggers: 拆解这本书、五行拆书、读书笔记、读书笔记发飞书、读书笔记发群
|
||||
owner: 火炬
|
||||
version: "1.1"
|
||||
updated: "2026-03-03"
|
||||
version: "1.2"
|
||||
updated: "2026-03-12"
|
||||
---
|
||||
|
||||
# 读书笔记
|
||||
@@ -39,39 +39,92 @@ updated: "2026-03-03"
|
||||
|
||||
`/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind`
|
||||
|
||||
## 完整工作流程
|
||||
## 完整工作流程(6 步,任何模型可执行)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────┐
|
||||
│ 读书笔记完整流程 │
|
||||
│ 读书笔记完整流程 v1.2(6步自动化) │
|
||||
├─────────────────────────────────────────────────────────────────────────┤
|
||||
│ │
|
||||
│ 1. 用户提供书籍内容 │
|
||||
│ 书籍文件 / 章节内容 / 核心摘要 │
|
||||
│ 步骤1: 用户提供书籍内容 │
|
||||
│ 书籍文件 / 章节内容 / 核心摘要 / 对话讲稿 │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ 2. AI 使用卡读提示词拆解 │
|
||||
│ references/卡读提示词.md │
|
||||
│ 步骤2: AI 按卡读格式五行拆解 → 写 MD 文件 │
|
||||
│ 格式:金水木火土 + 总结 + 人物(MBTI/DISC/PDP) + 问题 + 金句 │
|
||||
│ 存放:/Users/karuo/Documents/个人/2、我写的日记/读书笔记/ │
|
||||
│ 文件名:书名_读书笔记.md │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ 3. 输出五行结构笔记 │
|
||||
│ 一句话总结 + 金水木火土 + 问答 + 人物 + 金句 │
|
||||
│ 步骤3: 检查并安装 XMind → 写入脑图 │
|
||||
│ 先检查:ls /Applications/ | grep -i xmind │
|
||||
│ 未安装:brew install --cask xmind 或 open https://xmind.app │
|
||||
│ 写入脚本:xmind_人选天选论_模板表.py(复用该模板格式) │
|
||||
│ 规则:书名 sheet(tab=书名)+ 总结/描述/人物/问题/金句连书名 │
|
||||
│ 金水木火土为独立节点(detached),位置对齐模板 │
|
||||
│ XMind 路径:/Users/karuo/Documents/我的脑图/5 学习/读书笔记.xmind │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ 4. 确定分类 │
|
||||
│ 个人提升 / 人际关系 / 创业 / 商业思维 / 投资 │
|
||||
│ 步骤4: 导出脑图图片 → 插入 MD 文章 │
|
||||
│ ① 用 Python 生成脑图图像(GenerateImage 工具) │
|
||||
│ 存放:卡若Ai的文件夹/图片/书名_读书笔记_思维导图.png │
|
||||
│ 登记:卡若Ai的文件夹/图片/图片索引.md │
|
||||
│ ② 同时复制图片到 MD 同目录,在 MD 顶部插入图片引用 │
|
||||
│  │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ 5. 写入 XMind │
|
||||
│ scripts/write_to_xmind.py 自动创建标签页和链接 │
|
||||
│ 步骤5: 发送到飞书群(webhook) │
|
||||
│ 脚本:reading_note_send_webhook.py │
|
||||
│ 发送:文章摘要(富文本)+ 脑图图片 │
|
||||
│ 默认 webhook: │
|
||||
│ https://open.feishu.cn/open-apis/bot/v2/hook/ │
|
||||
│ 8b7f996e-2892-4075-989f-aa5593ea4fbc │
|
||||
│ 命令: │
|
||||
│ python3 reading_note_send_webhook.py \ │
|
||||
│ --md "/path/读书笔记.md" \ │
|
||||
│ --img "/path/脑图.png" \ │
|
||||
│ --webhook "https://..." \ │
|
||||
│ --title "书名" │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ 6. 打开 XMind 确认 │
|
||||
│ open "读书笔记.xmind" │
|
||||
│ 步骤6: 同步飞书知识库(wiki) │
|
||||
│ 父节点:QPyPwwUmtiweUOk6aTmcZLBxnIg(读书笔记目录) │
|
||||
│ 脚本:bash 读书笔记_上传到飞书.sh(已有) │
|
||||
│ 或:python3 feishu_article_unified_publish.py \ │
|
||||
│ --parent QPyPwwUmtiweUOk6aTmcZLBxnIg \ │
|
||||
│ --title "卡若读书笔记:书名" \ │
|
||||
│ --md "/path/读书笔记.md" \ │
|
||||
│ --json "/tmp/书名.json" │
|
||||
│ │
|
||||
└─────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### XMind 未安装时的处理
|
||||
|
||||
```bash
|
||||
# 检查
|
||||
ls /Applications/ | grep -i xmind
|
||||
|
||||
# 未安装:brew 安装(需要 Homebrew)
|
||||
brew install --cask xmind
|
||||
|
||||
# 或手动下载
|
||||
open https://xmind.app/download/
|
||||
|
||||
# 安装完毕后验证
|
||||
ls /Applications/Xmind.app
|
||||
```
|
||||
|
||||
### 飞书群 webhook 配置
|
||||
|
||||
| 配置项 | 值 |
|
||||
|:---|:---|
|
||||
| **默认群 webhook** | `https://open.feishu.cn/open-apis/bot/v2/hook/8b7f996e-2892-4075-989f-aa5593ea4fbc` |
|
||||
| **图片上传用 token** | tenant_access_token(用 APP_ID + APP_SECRET 获取,不需要用户授权) |
|
||||
| **脚本路径** | `02_卡人(水)/水桥_平台对接/飞书管理/脚本/reading_note_send_webhook.py` |
|
||||
|
||||
> ⚠️ 上传图片必须用 `tenant_access_token`,不能用 `user_access_token`(会报 99991668)。
|
||||
|
||||
## 主导图分类
|
||||
|
||||
| 序号 | 分类 | 适用书籍类型 |
|
||||
|
||||
135
运营中枢/参考资料/OpenClaw记忆功能学习与卡若AI对照.md
Normal file
135
运营中枢/参考资料/OpenClaw记忆功能学习与卡若AI对照.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# OpenClaw 记忆功能学习与卡若AI 对照
|
||||
|
||||
> 学习对象:OpenClaw 官方 Memory 文档 + codesfly/openclaw-memory-final(生产级);对照卡若AI 记忆系统,提炼可借鉴点。
|
||||
> 更新:2026-03-03
|
||||
|
||||
---
|
||||
|
||||
## 一、OpenClaw 官方记忆(docs.openclaw.ai)
|
||||
|
||||
### 1.1 存储形态
|
||||
|
||||
- **纯 Markdown,文件为唯一真相源**;模型只「记住」写入磁盘的内容。
|
||||
- **两层**:
|
||||
- **长期**:`MEMORY.md`(可选),持久事实与决策。
|
||||
- **日志**:`memory/YYYY-MM-DD.md`,按日追加、会话启动时读「今天+昨天」。
|
||||
|
||||
### 1.2 工具
|
||||
|
||||
- **memory_get**:按文件/行范围精确读取。
|
||||
- **memory_search**:语义检索(基于索引片段);文件不存在时优雅降级返回空,不抛错。
|
||||
|
||||
### 1.3 何时写入
|
||||
|
||||
- 用户说「记住」→ 必须落盘,不留在内存。
|
||||
- 日常笔记、运行上下文 → `memory/YYYY-MM-DD.md`。
|
||||
- 决策、偏好、持久事实 → `MEMORY.md`。
|
||||
|
||||
### 1.4 自动 memory flush( compaction 前)
|
||||
|
||||
- 会话接近自动压缩时,触发**静默 agent 轮**,提醒模型在压缩前把该持久化的写入记忆。
|
||||
- 可配置:`memoryFlush.enabled`、`softThresholdTokens`、提示词(含 `NO_REPLY` 避免对用户可见)。
|
||||
|
||||
### 1.5 向量检索
|
||||
|
||||
- 可对 `MEMORY.md` 与 `memory/*.md` 建小向量索引,支持语义查询。
|
||||
- 默认按可用 Key 自动选择:Mistral / Voyage / Gemini / OpenAI / local;支持 Ollama、sqlite-vec 加速。
|
||||
- **QMD 后端(实验)**:`memory.backend = "qmd"`,本地 BM25 + 向量 + 重排,Markdown 仍为真相源,检索交给 QMD 侧车。
|
||||
|
||||
---
|
||||
|
||||
## 二、openclaw-memory-final(生产级,codesfly)
|
||||
|
||||
- **仓库**:[https://github.com/codesfly/openclaw-memory-final](https://github.com/codesfly/openclaw-memory-final)
|
||||
- **最新**:v0.4.2(2026-03),Node 22+,可选 QMD 索引。
|
||||
|
||||
### 2.1 设计目标
|
||||
|
||||
- **可靠性**:自愈、避免静默失败。
|
||||
- **幂等**:同一会话状态不重复写入日记忆块。
|
||||
- **成本可控**:避免不必要的向量 embed(每日 qmd update,每周才 qmd embed)。
|
||||
- **可审计**:决策与状态可追溯。
|
||||
|
||||
### 2.2 分层 Pipeline
|
||||
|
||||
|
||||
| 层级 | 说明 |
|
||||
| ---------------------- | ------------------------------------------------------------------------------------------------------------------- |
|
||||
| **短期工作台** | `memory/CURRENT_STATE.md`,当日工作台,compaction/重置后首选恢复点;可每轮覆盖(非仅追加)。建议字段:今日目标/进行中/阻塞/下一步≤3。 |
|
||||
| **日记忆日志** | `memory/YYYY-MM-DD.md`,主会话 + 子 Agent 产出经「每日蒸馏」写入,仅追加。 |
|
||||
| **任务结果卡** | `memory/tasks/YYYY-MM-DD.md`,**仅结果**的子 Agent 任务卡;主会话整理,子 Agent 原始过程保留在隔离历史供审计。 |
|
||||
| **每日蒸馏 Daily Sync** | 23:00 本地时间;近 26h 会话;过滤 <2 条用户消息的会话;写入日日志;**幂等游标**(last user message fingerprint)防重。 |
|
||||
| **每周精炼 Weekly Tidy** | 周日 22:00;合并近 7 日日志进长期记忆;约束 `MEMORY.md`(如 80 行/5KB);写周报 `memory/weekly/YYYY-MM-DD.md`;归档日日志到 `memory/archive/YYYY/`。 |
|
||||
| **Watchdog** | 每 2h 的 :15;检测禁用/陈旧/错误;**仅当连续 2 次异常才告警**(低噪)。 |
|
||||
| **Retrieval Watchdog** | 每 30 分钟;检索健康检查,2-hit 确认异常。 |
|
||||
| **QMD 夜间维护** | 每日 03:20;`qmd update`;仅当待处理积压超阈值时 `qmd embed`。 |
|
||||
|
||||
|
||||
### 2.3 多 Agent 记忆模型
|
||||
|
||||
- 主会话 = **记忆策展人**:整合持久结果与决策。
|
||||
- 子 Agent:只执行;原始过程留在隔离会话历史,可审计。
|
||||
- **共享交接层**:`memory/tasks/YYYY-MM-DD.md`,仅结果的任务卡。
|
||||
- **检索顺序**:先任务卡 → 再语义记忆搜索 → 最后按需钻取原始会话。
|
||||
|
||||
### 2.4 任务卡字段(建议)
|
||||
|
||||
- goal / boundary / acceptance / key actions / artifact paths / final status / next step
|
||||
|
||||
### 2.5 Context Budget + 动态画像
|
||||
|
||||
- `memory/context-profiles.json` 定义按画像的上下文来源。
|
||||
- 注入前**硬预算**:单文件上限(默认 20000 字符)、总上限(默认 80000 字符)。
|
||||
- 按优先级(如 main/ops/btc/quant)组 context pack,控制 prompt 膨胀与延迟。
|
||||
|
||||
### 2.6 冲突检测
|
||||
|
||||
- 持久化前扫描长期记忆中的路由/规则漂移,避免静默覆盖(`memory_conflict_check.py` → `memory-conflict-report.json`)。
|
||||
|
||||
### 2.7 状态文件(摘要)
|
||||
|
||||
- `processed-sessions.json`:幂等游标。
|
||||
- `memory-watchdog-state.json`:异常计数与 last3 快照。
|
||||
- `context-budget-state.json`:预算检查结果。
|
||||
- `memory-retrieval-watchdog-state.json`:检索健康状态。
|
||||
|
||||
---
|
||||
|
||||
## 三、卡若AI 记忆系统现状(简要)
|
||||
|
||||
|
||||
| 层级 | 位置 | 说明 |
|
||||
| ------- | -------------------------- | ----------------------------------------- |
|
||||
| **短期** | Cursor 对话上下文 | 单次对话有效 |
|
||||
| **长期** | `个人/1、卡若:本人/记忆.md` | 单文件,偏好/规则/人脉/原则 |
|
||||
| **结构化** | `水溪_整理归档/记忆系统/structured/` | 技能索引、Agent 成果、每日摘要、幂等游标、健康、watchdog 报告、周报 |
|
||||
|
||||
|
||||
**自动化**:`collect_chat_daily.py`(每日、幂等+脱敏)、`collect_daily.py`(每日摘要)、`weekly_optimize.py`(每周 SKILL 审计+经验整理)、`memory_watchdog.py`(每 2h、连续 2 次异常才告警)。
|
||||
|
||||
---
|
||||
|
||||
## 四、可借鉴点(卡若AI 学习清单)
|
||||
|
||||
|
||||
| OpenClaw / memory-final | 卡若AI 可借鉴 |
|
||||
| ----------------------------- | ------------------------------------------------------------------------- |
|
||||
| **CURRENT_STATE 短期工作台** | 可选:在 `记忆系统/` 或工作台增加「当日工作台」单文件(今日目标/进行中/阻塞/下一步≤3),compaction 或新会话时优先读。 |
|
||||
| **任务结果卡(仅结果)** | 多线程/子任务执行时,将每个子任务的结果写成「任务卡」到 `structured/tasks/YYYY-MM-DD.md`,检索时先任务卡再其他。 |
|
||||
| **检索顺序** | 明确:先查任务卡/结构化结果 → 再查长期记忆/经验库 → 最后按需查原始对话归档。 |
|
||||
| **Context budget** | 若未来做「记忆注入」到 prompt,先做单文件与总字符上限,避免一次性灌入过多记忆。 |
|
||||
| **Compaction 前 memory flush** | 若平台支持「压缩前回调」,可在压缩前提醒写入 `记忆.md` 或当日工作台。 |
|
||||
| **冲突检测** | 写入长期记忆或规则前,扫描已有规则/路由是否与新内容冲突,生成简单 conflict 报告。 |
|
||||
| **QMD / 向量检索** | 长期可选:对 `记忆.md` 或结构化摘要做本地向量索引(如 QMD 或现有嵌入),支持语义检索;当前卡若AI 以文件与关键词为主,可后续迭代。 |
|
||||
| **周报与归档** | 已有 weekly_report;可明确「周报 + 日日志归档到 archive/YYYY/」的目录规范,与 memory-final 对齐。 |
|
||||
|
||||
|
||||
---
|
||||
|
||||
## 五、参考链接
|
||||
|
||||
- OpenClaw Memory 概念:[https://docs.openclaw.ai/concepts/memory](https://docs.openclaw.ai/concepts/memory)
|
||||
- openclaw-memory-final:[https://github.com/codesfly/openclaw-memory-final](https://github.com/codesfly/openclaw-memory-final)
|
||||
- openclaw-memory-final 架构:[https://github.com/codesfly/openclaw-memory-final/blob/main/docs/architecture.md](https://github.com/codesfly/openclaw-memory-final/blob/main/docs/architecture.md)
|
||||
- 卡若AI 记忆系统:`02_卡人(水)/水溪_整理归档/记忆系统/README.md`
|
||||
|
||||
@@ -303,3 +303,4 @@
|
||||
| 2026-03-11 20:12:14 | 🔄 卡若AI 同步 2026-03-11 20:12 | 更新:水桥平台对接、水溪整理归档、运营中枢工作台 | 排除 >20MB: 11 个 |
|
||||
| 2026-03-11 20:22:53 | 🔄 卡若AI 同步 2026-03-11 20:22 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 |
|
||||
| 2026-03-11 20:48:52 | 🔄 卡若AI 同步 2026-03-11 20:48 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 |
|
||||
| 2026-03-11 21:09:46 | 🔄 卡若AI 同步 2026-03-11 21:09 | 更新:运营中枢工作台 | 排除 >20MB: 11 个 |
|
||||
|
||||
@@ -306,3 +306,4 @@
|
||||
| 2026-03-11 20:12:14 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-11 20:12 | 更新:水桥平台对接、水溪整理归档、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
|
||||
| 2026-03-11 20:22:53 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-11 20:22 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
|
||||
| 2026-03-11 20:48:52 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-11 20:48 | 更新:水桥平台对接、运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
|
||||
| 2026-03-11 21:09:46 | 成功 | 成功 | 🔄 卡若AI 同步 2026-03-11 21:09 | 更新:运营中枢工作台 | 排除 >20MB: 11 个 | [仓库](http://open.quwanzhi.com:3000/fnvtk/karuo-ai) [百科](http://open.quwanzhi.com:3000/fnvtk/karuo-ai/wiki) |
|
||||
|
||||
Reference in New Issue
Block a user