first commit

This commit is contained in:
ytc1012
2026-02-04 16:11:55 +08:00
commit 0f3ee050dc
165 changed files with 25795 additions and 0 deletions

View File

@@ -0,0 +1 @@
# Tools package

View File

@@ -0,0 +1,360 @@
#!/usr/bin/env python3
"""
Postmortem 匹配检查脚本
检查当前变更是否触发历史 postmortem
使用方法:
python tools/postmortem_check.py [--base main] [--mode warn|block]
返回码:
0 - 无匹配或仅警告
1 - 发现高置信度匹配blocking mode
2 - 错误
"""
import argparse
import fnmatch
import re
import subprocess
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import yaml
POSTMORTEM_DIR = Path(__file__).parent.parent / "postmortem"
@dataclass
class MatchResult:
"""单个匹配结果"""
pm_id: str
reason: str
confidence: float
@dataclass
class AggregatedMatch:
"""聚合后的匹配结果"""
pm_id: str
reasons: List[str] = field(default_factory=list)
max_confidence: float = 0.0
match_count: int = 0
final_confidence: float = 0.0
has_file_match: bool = False
has_content_match: bool = False
class PostmortemMatcher:
"""Postmortem 匹配器"""
# 匹配权重
WEIGHT_FILE_EXACT = 0.8
WEIGHT_FILE_PATTERN = 0.6
WEIGHT_FUNCTION = 0.7
WEIGHT_PATTERN = 0.5
WEIGHT_KEYWORD = 0.4
def __init__(self):
self.postmortems = self._load_all_postmortems()
def _load_all_postmortems(self) -> List[Dict]:
"""加载所有 postmortem 文件"""
pms = []
if not POSTMORTEM_DIR.exists():
return pms
for f in POSTMORTEM_DIR.glob("PM-*.yaml"):
try:
with open(f, encoding="utf-8") as fp:
pm = yaml.safe_load(fp)
if pm:
pms.append(pm)
except Exception as e:
print(f"Warning: Failed to load {f}: {e}", file=sys.stderr)
return pms
def match_files(self, changed_files: List[str]) -> List[MatchResult]:
"""匹配修改的文件"""
matches = []
for pm in self.postmortems:
triggers = pm.get("triggers", {})
pm_files = triggers.get("files", [])
pm_id = pm.get("id", "unknown")
for changed in changed_files:
for pattern in pm_files:
if self._file_matches(changed, pattern):
# 精确匹配 vs 模式匹配
is_exact = "*" not in pattern and "?" not in pattern
confidence = (
self.WEIGHT_FILE_EXACT if is_exact else self.WEIGHT_FILE_PATTERN
)
matches.append(
MatchResult(
pm_id=pm_id,
reason=f"File: {changed} ~ {pattern}",
confidence=confidence,
)
)
return matches
def match_diff_content(self, diff: str) -> List[MatchResult]:
"""匹配 diff 内容中的函数名和模式"""
matches = []
for pm in self.postmortems:
triggers = pm.get("triggers", {})
pm_id = pm.get("id", "unknown")
# 函数名匹配
for func in triggers.get("functions", []):
if not func:
continue
try:
if re.search(rf"\b{re.escape(func)}\b", diff):
matches.append(
MatchResult(
pm_id=pm_id,
reason=f"Function: {func}",
confidence=self.WEIGHT_FUNCTION,
)
)
except re.error:
continue
# 正则模式匹配
for pattern in triggers.get("patterns", []):
if not pattern:
continue
try:
if re.search(pattern, diff, re.IGNORECASE):
matches.append(
MatchResult(
pm_id=pm_id,
reason=f"Pattern: {pattern}",
confidence=self.WEIGHT_PATTERN,
)
)
except re.error:
continue
# 关键词匹配
for keyword in triggers.get("keywords", []):
if not keyword:
continue
if keyword.lower() in diff.lower():
matches.append(
MatchResult(
pm_id=pm_id,
reason=f"Keyword: {keyword}",
confidence=self.WEIGHT_KEYWORD,
)
)
return matches
def _file_matches(self, filepath: str, pattern: str) -> bool:
"""检查文件路径是否匹配模式"""
# 支持 glob 模式
if fnmatch.fnmatch(filepath, pattern):
return True
# 也尝试匹配文件名部分
if fnmatch.fnmatch(Path(filepath).name, pattern):
return True
# 检查是否包含(用于简单的路径匹配)
if "*" not in pattern and "?" not in pattern:
return pattern in filepath
return False
def aggregate_matches(
self, file_matches: List[MatchResult], content_matches: List[MatchResult]
) -> Dict[str, AggregatedMatch]:
"""聚合匹配结果,计算综合置信度"""
result: Dict[str, AggregatedMatch] = {}
all_matches = file_matches + content_matches
for match in all_matches:
if match.pm_id not in result:
result[match.pm_id] = AggregatedMatch(pm_id=match.pm_id)
agg = result[match.pm_id]
agg.reasons.append(match.reason)
agg.max_confidence = max(agg.max_confidence, match.confidence)
agg.match_count += 1
if "File" in match.reason:
agg.has_file_match = True
if "Function" in match.reason or "Pattern" in match.reason:
agg.has_content_match = True
# 计算综合置信度
for pm_id, agg in result.items():
base = agg.max_confidence
# 每增加一个匹配项,加 0.1 bonus最多 0.3
count_bonus = min(0.3, 0.1 * (agg.match_count - 1))
# 如果同时有文件匹配和内容匹配,额外加 0.1
cross_bonus = 0.1 if (agg.has_file_match and agg.has_content_match) else 0
agg.final_confidence = min(1.0, base + count_bonus + cross_bonus)
return result
def get_postmortem_details(self, pm_id: str) -> Optional[Dict]:
"""获取 postmortem 详情"""
for pm in self.postmortems:
if pm.get("id") == pm_id:
return pm
return None
def get_changed_files(base_ref: str) -> List[str]:
"""获取相对于 base 的变更文件"""
cwd = POSTMORTEM_DIR.parent
# 尝试不同的 diff 方式
# 1. 三点 diff用于 PR
cmd = ["git", "diff", "--name-only", f"{base_ref}...HEAD"]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
if result.returncode != 0:
# 2. 两点 diff
cmd = ["git", "diff", "--name-only", f"{base_ref}..HEAD"]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
if result.returncode != 0:
# 3. 直接对比
cmd = ["git", "diff", "--name-only", base_ref]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
return [f for f in result.stdout.strip().split("\n") if f]
def get_diff_content(base_ref: str) -> str:
"""获取相对于 base 的 diff 内容"""
cwd = POSTMORTEM_DIR.parent
# 只看代码文件的 diff
extensions = ["*.py", "*.js", "*.ts", "*.jsx", "*.tsx"]
cmd = ["git", "diff", f"{base_ref}...HEAD", "--"] + extensions
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
if result.returncode != 0:
cmd = ["git", "diff", f"{base_ref}..HEAD", "--"] + extensions
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
if result.returncode != 0:
cmd = ["git", "diff", base_ref, "--"] + extensions
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
return result.stdout
def main():
parser = argparse.ArgumentParser(description="Postmortem Check")
parser.add_argument("--base", default="main", help="Base branch/commit")
parser.add_argument(
"--mode",
choices=["warn", "block"],
default="warn",
help="warn: only print, block: exit 1 on high confidence",
)
parser.add_argument(
"--threshold",
type=float,
default=0.7,
help="Confidence threshold for blocking (default: 0.7)",
)
parser.add_argument(
"--output",
choices=["text", "json"],
default="text",
help="Output format",
)
args = parser.parse_args()
if not POSTMORTEM_DIR.exists():
print("No postmortem directory found. Run postmortem_init.py first.")
sys.exit(0)
matcher = PostmortemMatcher()
if not matcher.postmortems:
print("No postmortems found.")
sys.exit(0)
# 获取变更
changed_files = get_changed_files(args.base)
diff_content = get_diff_content(args.base)
if not changed_files:
print("No changes detected.")
sys.exit(0)
print(f"Checking {len(changed_files)} changed files against {len(matcher.postmortems)} postmortems...")
# 执行匹配
file_matches = matcher.match_files(changed_files)
content_matches = matcher.match_diff_content(diff_content)
# 聚合结果
results = matcher.aggregate_matches(file_matches, content_matches)
if not results:
print("No postmortem matches found.")
sys.exit(0)
# 输出结果
blocking = []
warnings = []
sorted_results = sorted(
results.items(), key=lambda x: x[1].final_confidence, reverse=True
)
for pm_id, agg in sorted_results:
confidence = agg.final_confidence
is_blocking = confidence >= args.threshold
level = "BLOCK" if is_blocking else "WARN"
# 获取 postmortem 详情
pm = matcher.get_postmortem_details(pm_id)
if not pm:
pm = {"title": "Unknown", "severity": "unknown"}
print(f"\n[{level}] {pm_id} ({confidence:.0%} confidence)")
print(f" Title: {pm.get('title', 'N/A')}")
print(f" Severity: {pm.get('severity', 'N/A')}")
print(f" Reasons:")
for reason in agg.reasons[:5]: # 最多显示5个原因
print(f" - {reason}")
if pm.get("verification"):
print(f" Verification checklist:")
for check in pm["verification"]:
print(f" [ ] {check}")
if is_blocking:
blocking.append(pm_id)
else:
warnings.append(pm_id)
# 总结
print(f"\n{'=' * 50}")
print(f"Summary: {len(blocking)} blocking, {len(warnings)} warnings")
# 决定退出码
if blocking and args.mode == "block":
print(f"\n{len(blocking)} postmortem(s) triggered with high confidence.")
print("Please review the changes and verify they don't reintroduce past issues.")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,316 @@
#!/usr/bin/env python3
"""
为单个 fix commit 生成 postmortem
使用方法:
python tools/postmortem_generate.py --commit abc1234 [--output postmortem/]
用于 GitHub Actions 中自动为新的 fix commit 生成 postmortem。
"""
import argparse
import asyncio
import json
import re
import subprocess
import sys
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, Optional
import yaml
# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))
POSTMORTEM_DIR = Path(__file__).parent.parent / "postmortem"
def is_fix_commit(commit_hash: str) -> bool:
"""检查是否是 fix commit"""
cwd = POSTMORTEM_DIR.parent
cmd = ["git", "log", "-1", "--format=%s", commit_hash]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
subject = result.stdout.strip().lower()
return subject.startswith("fix")
def get_commit_info(commit_hash: str) -> Dict:
"""获取 commit 详细信息"""
cwd = POSTMORTEM_DIR.parent
# subject
cmd = ["git", "log", "-1", "--format=%s", commit_hash]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
subject = result.stdout.strip()
# body
cmd = ["git", "log", "-1", "--format=%b", commit_hash]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
body = result.stdout.strip()
# date
cmd = ["git", "log", "-1", "--format=%aI", commit_hash]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
date = result.stdout.strip()
# files changed
cmd = ["git", "show", commit_hash, "--name-only", "--format="]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
files = [f for f in result.stdout.strip().split("\n") if f]
# diff (仅 Python 文件)
cmd = ["git", "show", commit_hash, "-p", "--", "*.py"]
result = subprocess.run(cmd, capture_output=True, text=True, cwd=cwd)
diff = result.stdout[:6000]
return {
"hash": commit_hash[:7],
"full_hash": commit_hash,
"subject": subject,
"body": body,
"date": date,
"files": files,
"diff": diff,
}
def get_next_pm_id(output_dir: Path) -> str:
"""获取下一个 postmortem ID"""
year = datetime.now().year
output_dir.mkdir(exist_ok=True)
existing = list(output_dir.glob(f"PM-{year}-*.yaml"))
if not existing:
return f"PM-{year}-001"
max_num = max(int(f.stem.split("-")[-1]) for f in existing)
return f"PM-{year}-{max_num + 1:03d}"
def parse_llm_response(response: str) -> Dict:
"""解析 LLM 响应"""
# 尝试直接解析
try:
return json.loads(response)
except json.JSONDecodeError:
pass
# 去除 markdown 代码块
cleaned = re.sub(r"^```(?:json)?\s*", "", response, flags=re.MULTILINE)
cleaned = re.sub(r"\s*```$", "", cleaned, flags=re.MULTILINE)
cleaned = cleaned.strip()
try:
return json.loads(cleaned)
except json.JSONDecodeError:
pass
# 尝试提取 JSON 对象
match = re.search(r"\{[\s\S]*\}", cleaned)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
return {"title": "Parse failed", "severity": "low", "tags": ["parse-error"]}
def extract_from_commit(info: Dict) -> Dict:
"""从 commit 消息直接提取(无 LLM fallback"""
subject = info.get("subject", "")
body = info.get("body", "")
files = info.get("files", [])
diff = info.get("diff", "")
# 从 scope 提取 tags
scope_match = re.search(r"fix\((\w+)\)", subject, re.IGNORECASE)
tags = [scope_match.group(1)] if scope_match else []
# 清理标题
title = subject
title = re.sub(r"^fix(\([^)]+\))?:\s*", "", title, flags=re.IGNORECASE)
# 提取函数名
func_matches = re.findall(r"def\s+(\w+)\s*\(", diff)
functions = list(set(func_matches))[:5]
return {
"title": title[:50] if title else "Fix commit",
"description": body[:300] if body else subject,
"root_cause": "See commit body for details",
"severity": "medium",
"triggers": {
"files": files[:5],
"functions": functions,
"patterns": [],
"keywords": tags or ["general"],
},
"fix_pattern": {
"approach": title,
"key_changes": [title],
},
"verification": ["Review related code changes"],
"tags": tags or ["general"],
}
async def generate_with_llm(info: Dict) -> Dict:
"""使用 LLM 生成 postmortem"""
try:
from app.llm import LLM
llm = LLM()
except ImportError as e:
print(f"Warning: Cannot import LLM module: {e}")
return extract_from_commit(info)
except Exception as e:
print(f"Warning: LLM init failed: {e}")
return extract_from_commit(info)
prompt = f"""分析这个 fix commit生成 postmortem JSON
Commit: {info['subject']}
Body: {info['body'][:1000]}
Files: {', '.join(info['files'][:10])}
Diff preview: {info['diff'][:2500]}
返回 JSON 格式:
{{
"title": "简短标题(中文)",
"description": "问题描述2-3句话",
"root_cause": "根因分析",
"severity": "medium",
"triggers": {{
"files": ["相关文件模式"],
"functions": ["相关函数名"],
"patterns": ["正则模式"],
"keywords": ["关键词"]
}},
"fix_pattern": {{
"approach": "修复方法",
"key_changes": ["关键变更"]
}},
"verification": ["验证点"],
"tags": ["标签"]
}}
只返回 JSON不要其他文字。"""
try:
response = await llm.ask(
messages=[{"role": "user", "content": prompt}],
stream=False,
temperature=0.2,
)
return parse_llm_response(response)
except Exception as e:
print(f"Warning: LLM call failed: {e}")
return extract_from_commit(info)
def save_postmortem(data: Dict, info: Dict, pm_id: str, output_dir: Path) -> Path:
"""保存 postmortem"""
output_dir.mkdir(exist_ok=True)
# 确保 triggers 有完整结构
triggers = data.get("triggers", {})
if not isinstance(triggers, dict):
triggers = {}
pm = {
"id": pm_id,
"created_at": datetime.now(timezone.utc).isoformat(),
"source_commit": info["hash"],
"severity": data.get("severity", "medium"),
"title": data.get("title", "Untitled"),
"description": data.get("description", ""),
"root_cause": data.get("root_cause", ""),
"triggers": {
"files": triggers.get("files", info.get("files", [])[:5]),
"functions": triggers.get("functions", []),
"patterns": triggers.get("patterns", []),
"keywords": triggers.get("keywords", []),
},
"fix_pattern": data.get("fix_pattern", {}),
"verification": data.get("verification", []),
"related": {
"files_changed": info.get("files", []),
},
"tags": data.get("tags", []),
}
filepath = output_dir / f"{pm_id}.yaml"
with open(filepath, "w", encoding="utf-8") as f:
yaml.dump(pm, f, allow_unicode=True, default_flow_style=False, sort_keys=False)
return filepath
def check_duplicate(info: Dict, output_dir: Path) -> Optional[str]:
"""检查是否已存在相同 commit 的 postmortem"""
if not output_dir.exists():
return None
for f in output_dir.glob("PM-*.yaml"):
try:
with open(f, encoding="utf-8") as fp:
pm = yaml.safe_load(fp)
if pm and pm.get("source_commit") == info["hash"]:
return f.name
except Exception:
continue
return None
async def main():
parser = argparse.ArgumentParser(description="Generate postmortem for a fix commit")
parser.add_argument("--commit", required=True, help="Commit hash")
parser.add_argument("--output", default="postmortem", help="Output directory")
parser.add_argument(
"--force",
action="store_true",
help="Force generation even if not a fix commit",
)
parser.add_argument(
"--no-llm",
action="store_true",
help="Skip LLM, use rule-based extraction only",
)
args = parser.parse_args()
output_dir = Path(args.output)
if not output_dir.is_absolute():
output_dir = POSTMORTEM_DIR.parent / args.output
# 检查是否是 fix commit
if not args.force and not is_fix_commit(args.commit):
print(f"Commit {args.commit} is not a fix commit. Skipping.")
print("Use --force to generate anyway.")
return
# 获取 commit 信息
info = get_commit_info(args.commit)
print(f"Processing: {info['subject'][:60]}")
# 检查重复
existing = check_duplicate(info, output_dir)
if existing:
print(f"Postmortem already exists: {existing}")
return
# 生成 postmortem
if args.no_llm:
data = extract_from_commit(info)
else:
data = await generate_with_llm(info)
# 保存
pm_id = get_next_pm_id(output_dir)
filepath = save_postmortem(data, info, pm_id, output_dir)
print(f"Generated: {filepath}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,357 @@
#!/usr/bin/env python3
"""
Postmortem Onboarding 脚本
分析历史 fix commits生成初始 postmortem 集合
使用方法:
python tools/postmortem_init.py [--since 2025-06-01] [--limit 50] [--dry-run]
"""
import argparse
import asyncio
import json
import re
import subprocess
import sys
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
import yaml
# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))
POSTMORTEM_DIR = Path(__file__).parent.parent / "postmortem"
def get_fix_commits(since: Optional[str] = None, limit: int = 100) -> List[Dict]:
"""获取 fix commits 列表"""
cmd = [
"git",
"log",
"--grep=^fix",
"-i",
"--all",
"--format=%H|%s|%aI",
]
if since:
cmd.extend(["--since", since])
result = subprocess.run(cmd, capture_output=True, text=True, cwd=POSTMORTEM_DIR.parent)
commits = []
for line in result.stdout.strip().split("\n"):
if not line:
continue
parts = line.split("|", 2)
if len(parts) >= 3:
commits.append(
{
"hash": parts[0],
"subject": parts[1],
"date": parts[2],
}
)
return commits[:limit]
def get_commit_details(commit_hash: str) -> Dict:
"""获取 commit 的详细信息"""
cwd = POSTMORTEM_DIR.parent
# 获取 body
body_cmd = ["git", "log", "-1", "--format=%b", commit_hash]
body_result = subprocess.run(body_cmd, capture_output=True, text=True, cwd=cwd)
body = body_result.stdout.strip()
# 获取修改的文件
files_cmd = ["git", "show", commit_hash, "--name-only", "--format="]
files_result = subprocess.run(files_cmd, capture_output=True, text=True, cwd=cwd)
files = [f for f in files_result.stdout.strip().split("\n") if f]
# 获取 diff 内容(限制大小,只看 .py 文件)
diff_cmd = ["git", "show", commit_hash, "--stat", "-p", "--", "*.py"]
diff_result = subprocess.run(diff_cmd, capture_output=True, text=True, cwd=cwd)
diff = diff_result.stdout[:6000] # 限制 6KB
return {"body": body, "files": files, "diff": diff}
def assess_commit_quality(commit: Dict, details: Dict) -> float:
"""评估 commit 消息质量,决定是否值得生成 postmortem"""
score = 0.0
body = details.get("body", "")
subject = commit.get("subject", "")
# 有详细描述
if len(body) > 50:
score += 0.3
if len(body) > 150:
score += 0.2
# 有问题描述关键词
problem_keywords = ["问题", "原因", "修复", "bug", "error", "issue", "cause", "fix"]
if any(kw in body.lower() for kw in problem_keywords):
score += 0.2
# 有结构化格式
if any(marker in body for marker in ["##", "- ", "1.", "*"]):
score += 0.1
# scope 清晰
if "(" in subject and ")" in subject:
score += 0.1
# 修改了重要文件
important_patterns = ["recommender", "api/index", "llm", "config"]
if any(
any(pat in f for pat in important_patterns) for f in details.get("files", [])
):
score += 0.1
return min(1.0, score)
def parse_llm_response(response: str) -> Dict:
"""健壮的 JSON 解析,处理 LLM 输出的各种格式"""
# 尝试直接解析
try:
return json.loads(response)
except json.JSONDecodeError:
pass
# 去除 markdown 代码块
cleaned = re.sub(r"^```(?:json)?\s*", "", response, flags=re.MULTILINE)
cleaned = re.sub(r"\s*```$", "", cleaned, flags=re.MULTILINE)
cleaned = cleaned.strip()
try:
return json.loads(cleaned)
except json.JSONDecodeError:
pass
# 尝试提取 JSON 对象
match = re.search(r"\{[\s\S]*\}", cleaned)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
# 返回基础结构
return {
"title": "解析失败",
"description": response[:500],
"severity": "low",
"tags": ["parse-failed"],
}
async def generate_postmortem_with_llm(commit: Dict, details: Dict) -> Dict:
"""使用 LLM 生成 postmortem"""
try:
from app.llm import LLM
llm = LLM()
except ImportError as e:
print(f" Warning: Cannot import LLM module: {e}")
return extract_from_commit(commit, details)
except Exception as e:
print(f" Warning: LLM init failed: {e}")
return extract_from_commit(commit, details)
prompt = f"""分析以下 git fix commit生成一个 postmortem 条目。
Commit 信息:
- Subject: {commit['subject']}
- Date: {commit['date']}
- Body: {details.get('body', '(无)')[:1000]}
修改的文件:
{chr(10).join(details['files'][:15])}
代码变更摘要:
{details['diff'][:3000]}
请生成 JSON 格式的 postmortem包含以下字段
1. title: 简短标题中文10-30字
2. description: 问题描述2-3句话描述问题现象和影响
3. root_cause: 根因分析1-2句话
4. severity: critical/high/medium/low根据影响范围判断
5. triggers: 对象,包含:
- files: 相关文件模式列表(如 "app/tool/*.py"
- functions: 相关函数名列表(从 diff 中提取)
- patterns: 正则匹配模式列表(用于匹配未来的 diff 内容)
- keywords: 关键词列表(中英文都可以)
6. fix_pattern: 对象,包含:
- approach: 修复方法描述
- key_changes: 关键变更点列表
7. verification: 验证检查点列表(未来修改相关代码时应检查的事项)
8. tags: 标签列表(用于分类,如 geocoding, ui, api 等)
只返回 JSON不要其他文字。"""
try:
response = await llm.ask(
messages=[{"role": "user", "content": prompt}],
stream=False,
temperature=0.2,
)
return parse_llm_response(response)
except Exception as e:
print(f" Warning: LLM call failed: {e}")
return extract_from_commit(commit, details)
def extract_from_commit(commit: Dict, details: Dict) -> Dict:
"""从 commit 消息直接提取(无 LLM fallback"""
subject = commit.get("subject", "")
body = details.get("body", "")
files = details.get("files", [])
# 从 scope 提取 tags
scope_match = re.search(r"fix\((\w+)\)", subject, re.IGNORECASE)
tags = [scope_match.group(1)] if scope_match else []
# 清理标题
title = subject
title = re.sub(r"^fix(\([^)]+\))?:\s*", "", title, flags=re.IGNORECASE)
# 提取函数名
functions = []
diff = details.get("diff", "")
func_matches = re.findall(r"def\s+(\w+)\s*\(", diff)
functions = list(set(func_matches))[:5]
return {
"title": title[:50] if title else "Fix commit",
"description": body[:300] if body else subject,
"root_cause": "See commit body for details",
"severity": "medium",
"triggers": {
"files": files[:5],
"functions": functions,
"patterns": [],
"keywords": tags or ["general"],
},
"fix_pattern": {
"approach": title,
"key_changes": [title],
},
"verification": ["Review related code changes"],
"tags": tags or ["general"],
}
def get_next_pm_id(year: int) -> str:
"""获取下一个 postmortem ID"""
POSTMORTEM_DIR.mkdir(exist_ok=True)
existing = list(POSTMORTEM_DIR.glob(f"PM-{year}-*.yaml"))
if not existing:
return f"PM-{year}-001"
max_num = max(int(f.stem.split("-")[-1]) for f in existing)
return f"PM-{year}-{max_num + 1:03d}"
def save_postmortem(pm_data: Dict, commit: Dict, details: Dict, pm_id: str) -> Path:
"""保存 postmortem 到 YAML 文件"""
POSTMORTEM_DIR.mkdir(exist_ok=True)
# 确保 triggers 有完整结构
triggers = pm_data.get("triggers", {})
if not isinstance(triggers, dict):
triggers = {}
output = {
"id": pm_id,
"created_at": datetime.utcnow().isoformat() + "Z",
"source_commit": commit["hash"][:7],
"severity": pm_data.get("severity", "medium"),
"title": pm_data.get("title", "Untitled"),
"description": pm_data.get("description", ""),
"root_cause": pm_data.get("root_cause", ""),
"triggers": {
"files": triggers.get("files", details.get("files", [])[:5]),
"functions": triggers.get("functions", []),
"patterns": triggers.get("patterns", []),
"keywords": triggers.get("keywords", []),
},
"fix_pattern": pm_data.get("fix_pattern", {}),
"verification": pm_data.get("verification", []),
"related": {
"files_changed": details.get("files", []),
},
"tags": pm_data.get("tags", []),
}
filepath = POSTMORTEM_DIR / f"{pm_id}.yaml"
with open(filepath, "w", encoding="utf-8") as f:
yaml.dump(
output, f, allow_unicode=True, default_flow_style=False, sort_keys=False
)
return filepath
async def main():
parser = argparse.ArgumentParser(description="Postmortem Onboarding")
parser.add_argument("--since", help="Start date (YYYY-MM-DD)")
parser.add_argument("--limit", type=int, default=50, help="Max commits to process")
parser.add_argument("--dry-run", action="store_true", help="Preview only, no generation")
parser.add_argument(
"--min-quality",
type=float,
default=0.2,
help="Minimum quality score to generate postmortem",
)
args = parser.parse_args()
print("Fetching fix commits...")
commits = get_fix_commits(since=args.since, limit=args.limit)
print(f"Found {len(commits)} fix commits")
if not commits:
print("No fix commits found.")
return
generated = 0
skipped = 0
for i, commit in enumerate(commits):
print(f"\n[{i + 1}/{len(commits)}] {commit['hash'][:7]}: {commit['subject'][:60]}")
details = get_commit_details(commit["hash"])
quality = assess_commit_quality(commit, details)
print(f" Quality: {quality:.2f}, Files: {len(details['files'])}")
if quality < args.min_quality:
print(f" Skipped: quality below threshold ({args.min_quality})")
skipped += 1
continue
if args.dry_run:
print(" [DRY-RUN] Would generate postmortem")
continue
# 生成 postmortem
pm_data = await generate_postmortem_with_llm(commit, details)
# 生成 ID使用 commit 日期的年份)
year = int(commit["date"][:4])
pm_id = get_next_pm_id(year)
filepath = save_postmortem(pm_data, commit, details, pm_id)
print(f" Saved: {filepath}")
generated += 1
print(f"\n{'=' * 50}")
print(f"Summary: Generated {generated}, Skipped {skipped}")
if generated > 0:
print(f"Postmortems saved to: {POSTMORTEM_DIR}/")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -0,0 +1,216 @@
"""
WCAG 2.1色彩对比度验证工具
验证所有设计token中的颜色组合是否符合WCAG标准:
- AA级 (正文): 对比度 ≥ 4.5:1
- AA级 (大文字): 对比度 ≥ 3.0:1
- AAA级 (正文): 对比度 ≥ 7.0:1
使用方法:
python tools/validate_colors.py
或集成到CI/CD:
pytest tests/test_accessibility.py::test_color_contrast
"""
import math
import sys
from pathlib import Path
from typing import Dict, Tuple
# 添加项目根目录到Python路径
sys.path.insert(0, str(Path(__file__).parent.parent))
from app.design_tokens import DesignTokens
def hex_to_rgb(hex_color: str) -> Tuple[int, int, int]:
"""将十六进制颜色转换为RGB"""
hex_color = hex_color.lstrip("#")
return tuple(int(hex_color[i : i + 2], 16) for i in (0, 2, 4))
def relative_luminance(rgb: Tuple[int, int, int]) -> float:
"""
计算相对亮度 (WCAG公式)
https://www.w3.org/TR/WCAG21/#dfn-relative-luminance
"""
r, g, b = [x / 255.0 for x in rgb]
def adjust(channel):
if channel <= 0.03928:
return channel / 12.92
return math.pow((channel + 0.055) / 1.055, 2.4)
r, g, b = adjust(r), adjust(g), adjust(b)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def contrast_ratio(color1: str, color2: str) -> float:
"""
计算两个颜色之间的对比度
https://www.w3.org/TR/WCAG21/#dfn-contrast-ratio
Returns:
对比度比值 (1:1 到 21:1)
"""
lum1 = relative_luminance(hex_to_rgb(color1))
lum2 = relative_luminance(hex_to_rgb(color2))
lighter = max(lum1, lum2)
darker = min(lum1, lum2)
return (lighter + 0.05) / (darker + 0.05)
def check_wcag_compliance(
foreground: str, background: str, level: str = "AA", text_size: str = "normal"
) -> Dict[str, any]:
"""
检查颜色组合是否符合WCAG标准
Args:
foreground: 前景色 (文字)
background: 背景色
level: WCAG级别 ("AA""AAA")
text_size: 文字大小 ("normal""large")
Returns:
{
"ratio": 对比度,
"passes": 是否通过,
"level": 合规级别,
"recommendation": 建议
}
"""
ratio = contrast_ratio(foreground, background)
# WCAG标准阈值
thresholds = {
"AA": {"normal": 4.5, "large": 3.0},
"AAA": {"normal": 7.0, "large": 4.5},
}
required_ratio = thresholds[level][text_size]
passes = ratio >= required_ratio
result = {
"ratio": round(ratio, 2),
"passes": passes,
"level": level,
"required": required_ratio,
"foreground": foreground,
"background": background,
}
# 生成建议
if not passes:
if ratio < required_ratio * 0.8:
result["recommendation"] = "对比度严重不足,需要更换颜色"
else:
result["recommendation"] = "对比度略低,建议微调颜色深浅"
else:
if ratio >= thresholds["AAA"][text_size]:
result["recommendation"] = "优秀符合WCAG AAA级标准"
else:
result["recommendation"] = "符合WCAG AA级标准"
return result
def validate_design_tokens():
"""验证所有设计token的色彩对比度"""
results = []
print("=" * 80)
print("MeetSpot Design Tokens - WCAG 2.1色彩对比度验证报告")
print("=" * 80)
print()
# 1. 验证品牌色在白色背景上
print("📊 品牌色 vs 白色背景")
print("-" * 80)
white_bg = DesignTokens.BACKGROUND["primary"]
for color_name, color_value in DesignTokens.BRAND.items():
if color_name == "gradient":
continue # 跳过渐变
result = check_wcag_compliance(color_value, white_bg, "AA", "normal")
results.append(result)
status = "✅ PASS" if result["passes"] else "❌ FAIL"
print(
f"{status} | {color_name:20s} | {color_value:10s} | {result['ratio']:5.2f}:1 | {result['recommendation']}"
)
print()
# 2. 验证文字色在白色背景上
print("📊 文字色 vs 白色背景")
print("-" * 80)
for color_name, color_value in DesignTokens.TEXT.items():
if color_name == "inverse":
continue # 跳过反转色
result = check_wcag_compliance(color_value, white_bg, "AA", "normal")
results.append(result)
status = "✅ PASS" if result["passes"] else "❌ FAIL"
print(
f"{status} | {color_name:20s} | {color_value:10s} | {result['ratio']:5.2f}:1 | {result['recommendation']}"
)
print()
# 3. 验证场所主题色
print("📊 场所主题色验证 (主色 vs 白色背景)")
print("-" * 80)
for venue_name, theme in DesignTokens.VENUE_THEMES.items():
if venue_name == "default":
continue
# 主色 vs 白色背景 (用于大文字/按钮)
result = check_wcag_compliance(
theme["theme_primary"], white_bg, "AA", "large" # 大文字标准 (3.0:1)
)
results.append(result)
status = "✅ PASS" if result["passes"] else "❌ FAIL"
print(
f"{status} | {venue_name:12s} | {theme['theme_primary']:10s} | {result['ratio']:5.2f}:1 | {result['recommendation']}"
)
# 深色 vs 浅色背景 (用于卡片内文字)
result_card = check_wcag_compliance(
theme["theme_dark"], theme["theme_light"], "AA", "normal"
)
results.append(result_card)
status_card = "✅ PASS" if result_card["passes"] else "❌ FAIL"
print(
f" └─ {status_card} | 卡片文字 | {theme['theme_dark']:10s} on {theme['theme_light']:10s} | {result_card['ratio']:5.2f}:1"
)
print()
print("=" * 80)
# 统计结果
total = len(results)
passed = sum(1 for r in results if r["passes"])
failed = total - passed
print(f"验证总数: {total}")
print(f"✅ 通过: {passed} ({passed/total*100:.1f}%)")
print(f"❌ 失败: {failed} ({failed/total*100:.1f}%)")
print("=" * 80)
# 返回是否全部通过
return failed == 0
if __name__ == "__main__":
all_passed = validate_design_tokens()
sys.exit(0 if all_passed else 1)