feat: sync lijiaoqiao implementation and staging validation artifacts

This commit is contained in:
Your Name
2026-03-31 13:40:00 +08:00
parent 0e5ecd930e
commit e9338dec28
686 changed files with 29213 additions and 168 deletions

View File

@@ -0,0 +1,95 @@
# 自动化Review系统使用指南
## 一、系统概述
本系统为立交桥项目提供自动化的周期性review功能每3小时执行一次检查每天生成一份完整报告。
## 二、系统架构
```
scripts/auto_review/
├── auto_review.sh # 主脚本
├── auto_review_config.sh # 配置文件
├── review.sh # 快速入口
├── crontab_config # Cron配置
└── task_queue.json # 任务队列
review/
├── daily_reports/ # 每日报告目录
├── knowledge_base/ # 经验知识库
└── task_queue.json # 任务队列
```
## 三、使用方法
### 3.1 手动执行
```bash
# 执行3小时review
./scripts/auto_review/review.sh hourly
# 执行每日全面review
./scripts/auto_review/review.sh daily
# 强制执行完整review
./scripts/auto_review/review.sh force
```
### 3.2 定时任务配置
添加到crontab
```bash
# 编辑crontab
crontab -e
# 添加以下行:
# 每3小时执行一次
0 */3 * * * /home/long/project/立交桥/scripts/auto_review/review.sh hourly >> /home/long/project/立交桥/logs/auto_review/cron.log 2>&1
# 每天凌晨3点执行全面review
0 3 * * * /home/long/project/立交桥/scripts/auto_review/review.sh daily >> /home/long/project/立交桥/logs/auto_review/cron_daily.log 2>&1
```
## 四、生成的报告
### 4.1 每日报告
- 位置:`review/daily_reports/daily_review_YYYY-MM-DD.md`
- 内容:变更文件、待办任务、新发现问题、专家状态
### 4.2 Claude Code任务
- 位置:`review/claude_tasks_YYYY-MM-DD.md`
- 触发条件:发现问题或文档变更
### 4.3 经验知识库
- 位置:`review/knowledge_base/rules_and_experience_YYYY-MM-DD.md`
- 更新频率每天凌晨3点
## 五、配置说明
编辑 `auto_review_config.sh` 可修改:
- 项目根目录
- Review频率
- 关键文档列表
- 专家角色列表
## 六、任务分发
当review发现问题或文档变更时系统会
1. 生成Claude任务文件
2. 更新任务队列
3. 记录到日志
用户可以查看任务文件并交给Claude Code执行。
## 七、日志
- 日志目录:`logs/auto_review/`
- 日志文件:`review_YYYYMMDD.log`
---
**维护者**:自动化系统
**更新时间**2026-03-30

View File

@@ -0,0 +1,486 @@
#!/bin/bash
#===============================================================================
# 自动化周期性Review脚本
# 功能每3小时执行一次项目全面review生成报告并分发任务
# 使用:./auto_review.sh [hourly|daily|force]
#===============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
REVIEW_DIR="$PROJECT_ROOT/review"
REPORT_DIR="$REVIEW_DIR/daily_reports"
KNOWLEDGE_DIR="$REVIEW_DIR/knowledge_base"
TASK_QUEUE="$REVIEW_DIR/task_queue.json"
LOG_DIR="$PROJECT_ROOT/logs/auto_review"
# 加载配置
source "$SCRIPT_DIR/auto_review_config.sh"
#-------------------------------------------------------------------------------
# 日志函数
#-------------------------------------------------------------------------------
log() {
local level=$1
shift
local msg="[$(date '+%Y-%m-%d %H:%M:%S')] [$level] $*"
echo "$msg"
echo "$msg" >> "$LOG_DIR/review_$(date '+%Y%m%d').log"
}
#-------------------------------------------------------------------------------
# 初始化
#-------------------------------------------------------------------------------
init() {
mkdir -p "$LOG_DIR" "$REPORT_DIR" "$KNOWLEDGE_DIR"
# 初始化任务队列
if [ ! -f "$TASK_QUEUE" ]; then
echo '{"tasks":[], "last_updated":"", "last_review_date":""}' > "$TASK_QUEUE"
fi
log "INFO" "Auto review system initialized"
}
#-------------------------------------------------------------------------------
# 获取文档变更状态
#-------------------------------------------------------------------------------
get_doc_changes() {
local since="${1:-24h}"
git -C "$PROJECT_ROOT" diff --name-only --since="$since" -- "docs/" "review/" 2>/dev/null || echo ""
}
#-------------------------------------------------------------------------------
# 读取上一份报告
#-------------------------------------------------------------------------------
read_last_report() {
local last_report=$(ls -t "$REPORT_DIR"/daily_review_*.md 2>/dev/null | head -1)
if [ -n "$last_report" ]; then
echo "$last_report"
else
echo ""
fi
}
#-------------------------------------------------------------------------------
# 提取未完成任务
#-------------------------------------------------------------------------------
extract_pending_tasks() {
local report_file="$1"
if [ -z "$report_file" ] || [ ! -f "$report_file" ]; then
echo "[]"
return
fi
# 提取P0/P1任务
grep -E "^\s*\|.*P[01].*\|" "$report_file" 2>/dev/null | \
grep -v "完成\|已关闭\|CLOSED" | \
sed -E 's/\|/,/g' | \
awk -F',' '{print "{\"id\":\""$1"\",\"desc\":\""$3"\",\"owner\":\""$4"\"}"}' | \
jq -s '.' 2>/dev/null || echo "[]"
}
#-------------------------------------------------------------------------------
# 执行Review检查
#-------------------------------------------------------------------------------
perform_review() {
local review_type="$1"
log "INFO" "Starting $review_type review..."
# 收集变更文件
local changes=""
local change_count=0
changes=$(get_doc_changes "3 hours ago" 2>/dev/null || echo "")
change_count=$(echo "$changes" | grep -c "." 2>/dev/null || echo "0")
# 检查是否有新的设计文档
local new_docs=0
new_docs=$(git -C "$PROJECT_ROOT" status --porcelain "docs/" 2>/dev/null | grep "^??" | wc -l 2>/dev/null || echo "0")
new_docs=${new_docs:-0}
# 检查未完成任务
local last_report=$(read_last_report)
local pending_tasks="[]"
local pending_count=0
if [ -n "$last_report" ] && [ -f "$last_report" ]; then
pending_tasks=$(extract_pending_tasks "$last_report" 2>/dev/null || echo "[]")
pending_count=$(echo "$pending_tasks" | jq 'length' 2>/dev/null || echo "0")
fi
pending_count=${pending_count:-0}
# 执行快速检查模拟专家review
local issues_found=0
# 检查关键文档是否存在
local critical_doc
for critical_doc in "$REVIEW_DIR/comprehensive_expert_review_report_v2_2026-03-18.md" "$PROJECT_ROOT/docs/architecture_solution_v1_2026-03-18.md"; do
if [ ! -f "$critical_doc" ]; then
log "WARN" "Critical document missing: $critical_doc"
issues_found=$((issues_found + 1))
fi
done
# 构建JSON确保所有值都是有效的
local change_files_json="[]"
if [ -n "$changes" ]; then
change_files_json=$(echo "$changes" | jq -R -s 'split("\n") | map(select(length > 0))' 2>/dev/null || echo "[]")
fi
# 确定是否需要处理
local action_required="false"
if [ "$issues_found" -gt 0 ] || [ "$change_count" -gt 0 ]; then
action_required="true"
fi
# 返回Review结果
cat << EOF
{
"review_type": "$review_type",
"timestamp": "$(date -Iseconds)",
"changes_count": $change_count,
"new_docs_count": $new_docs,
"pending_tasks_count": $pending_count,
"issues_found": $issues_found,
"change_files": $change_files_json,
"action_required": $action_required
}
EOF
}
#-------------------------------------------------------------------------------
# 生成每日报告
#-------------------------------------------------------------------------------
generate_daily_report() {
local review_data="$1"
local date_str=$(date '+%Y-%m-%d')
log "INFO" "Generating daily review report for $date_str..."
local report_file="$REPORT_DIR/daily_review_${date_str}.md"
local last_report=$(read_last_report)
# 提取review数据
local changes_count new_docs_count pending_tasks_count issues_found
changes_count=$(echo "$review_data" | jq -r '.changes_count // 0' 2>/dev/null || echo "0")
new_docs_count=$(echo "$review_data" | jq -r '.new_docs_count // 0' 2>/dev/null || echo "0")
pending_tasks_count=$(echo "$review_data" | jq -r '.pending_tasks_count // 0' 2>/dev/null || echo "0")
issues_found=$(echo "$review_data" | jq -r '.issues_found // 0' 2>/dev/null || echo "0")
# 提取变更文件列表
local change_list
change_list=$(echo "$review_data" | jq -r '.change_files[] // empty' 2>/dev/null | sed 's/^/- /' || echo "无变更")
# 生成报告头部
cat > "$report_file" << EOF
# 立交桥项目每日Review报告
> 生成时间:$(date '+%Y-%m-%d %H:%M:%S')
> 报告日期:$date_str
> Review类型每日全面检查
---
## 一、Review执行摘要
| 指标 | 数值 | 较昨日 |
|------|------|--------|
| 文档变更数 | $changes_count | - |
| 新增文档数 | $new_docs_count | - |
| 待完成任务 | $pending_tasks_count | - |
| 发现问题 | $issues_found | - |
---
## 二、变更文件清单
$change_list
---
## 三、待完成任务追踪
### 3.1 P0问题阻断上线
EOF
# 添加P0任务列表
if [ -n "$last_report" ]; then
grep -A 50 "### 3.1 P0问题" "$last_report" 2>/dev/null | head -30 >> "$report_file" || echo "| - | - | - | - |" >> "$report_file"
else
echo "| 编号 | 问题描述 | Owner | 状态 |" >> "$report_file"
echo "|-----|----------|-------|------|" >> "$report_file"
echo "| - | 暂无 | - | - |" >> "$report_file"
fi
cat >> "$report_file" << EOF
### 3.2 P1问题高优先级
EOF
if [ -n "$last_report" ]; then
grep -A 30 "### 3.2 P1问题" "$last_report" 2>/dev/null | head -20 >> "$report_file" || echo "| - | - | - |" >> "$report_file"
else
echo "| 编号 | 问题描述 | Owner |" >> "$report_file"
echo "|-----|----------|-------|" >> "$report_file"
fi
# 确定行动项文本
local action_text="无"
local new_issue_text="| - | - | 无新问题 | - |"
if [ "$issues_found" -gt 0 ]; then
action_text="存在 $issues_found 个问题需处理"
new_issue_text="| NEW-001 | P1 | 新发现的问题(待详细记录) | $(date '+%Y-%m-%d %H:%M') |"
fi
cat >> "$report_file" << EOF
---
## 四、新发现问题
| 编号 | 等级 | 问题描述 | 发现时间 |
|------|------|----------|----------|
$new_issue_text
---
## 五、建议行动项
1. **立即处理**$action_text
2. **持续跟进**$pending_tasks_count 个待办任务
3. **文档更新**$new_docs_count 个新文档待审核
---
## 六、专家评审状态
| 轮次 | 主题 | 结论 | 日期 |
|------|------|------|------|
| Round-1 | 架构与替换路径 | CONDITIONAL GO | 2026-03-19 |
| Round-2 | 兼容与计费一致性 | CONDITIONAL GO | 2026-03-22 |
| Round-3 | 安全与合规攻防 | CONDITIONAL GO | 2026-03-25 |
| Round-4 | 可靠性与回滚演练 | CONDITIONAL GO | 2026-03-29 |
---
**报告状态**:自动生成
**下次更新**$(date -d '+3 hours' '+%Y-%m-%d %H:%M')
EOF
log "INFO" "Daily report generated: $report_file"
echo "$report_file"
}
#-------------------------------------------------------------------------------
# 更新任务队列
#-------------------------------------------------------------------------------
update_task_queue() {
local review_data="$1"
local date_str=$(date '+%Y-%m-%d')
# 提取数据
local changes_count issues_found action_required
changes_count=$(echo "$review_data" | jq -r '.changes_count // 0' 2>/dev/null || echo "0")
issues_found=$(echo "$review_data" | jq -r '.issues_found // 0' 2>/dev/null || echo "0")
action_required=$(echo "$review_data" | jq -r '.action_required // "false"' 2>/dev/null || echo "false")
# 读取当前队列
local current_queue=$(cat "$TASK_QUEUE" 2>/dev/null || echo '{"tasks":[]}')
# 更新JSON
local updated
updated=$(echo "$current_queue" | jq --arg timestamp "$(date -Iseconds)" \
--arg date "$date_str" \
--argjson changes "$changes_count" \
--argjson issues "$issues_found" \
'.last_updated = $timestamp | .last_review_date = $date | .review_stats.total_reviews += 1 | .review_stats.issues_found += $issues')
echo "$updated" > "$TASK_QUEUE"
# 如果有问题需要处理,生成任务文件
if [ "$issues_found" -gt 0 ]; then
local task_file="$REVIEW_DIR/pending_tasks_$(date '+%Y%m%d_%H%M%S').json"
echo "$review_data" > "$task_file"
log "WARN" "Issues found! Task file created: $task_file"
fi
}
#-------------------------------------------------------------------------------
# 生成Claude Code任务
#-------------------------------------------------------------------------------
generate_claude_tasks() {
local review_data="$1"
local date_str=$(date '+%Y-%m-%d')
# 提取数据
local needs_action issues_found changes_count pending_tasks_count
needs_action=$(echo "$review_data" | jq -r '.action_required // "false"' 2>/dev/null || echo "false")
issues_found=$(echo "$review_data" | jq -r '.issues_found // 0' 2>/dev/null || echo "0")
changes_count=$(echo "$review_data" | jq -r '.changes_count // 0' 2>/dev/null || echo "0")
pending_tasks_count=$(echo "$review_data" | jq -r '.pending_tasks_count // 0' 2>/dev/null || echo "0")
# 提取变更文件列表
local change_list
change_list=$(echo "$review_data" | jq -r '.change_files[] // empty' 2>/dev/null | sed 's/^/1. 审核文档:/' || echo "1. 检查并处理review发现的问题")
if [ "$needs_action" = "true" ]; then
local task_file="$REVIEW_DIR/claude_tasks_${date_str}.md"
cat > "$task_file" << EOF
# Claude Code 执行任务
> 生成时间:$(date '+%Y-%m-%d %H:%M:%S')
> 触发条件Review发现需要处理的问题
## 执行要求
请Claude Code CLI按照以下规范执行
1. **遵循superpowers插件规范**
2. **严格按照项目规划设计执行**
3. **优先处理P0问题**
## 待处理问题清单
- 问题数量:$issues_found
- 文档变更:$changes_count 个文件
- 待办任务:$pending_tasks_count 个
## 具体任务
$change_list
---
**状态**:等待执行
**优先级**:高
EOF
log "INFO" "Claude tasks generated: $task_file"
echo "$task_file"
else
echo ""
fi
}
#-------------------------------------------------------------------------------
# 更新经验知识库每日3点执行
#-------------------------------------------------------------------------------
update_knowledge_base() {
local is_daily=$(date '+%H')
# 只在每天3点执行
if [ "$is_daily" != "03" ]; then
log "INFO" "Skipping knowledge base update (not 3am, current: ${is_daily}00)"
return 0
fi
log "INFO" "Updating knowledge base..."
local date_str=$(date '+%Y-%m-%d')
local kb_file="$KNOWLEDGE_BASE/rules_and_experience_${date_str}.md"
# 收集当天经验
local issues=""
local last_report=$(read_last_report)
if [ -n "$last_report" ]; then
issues=$(grep -E "^\|.*P[01]" "$last_report" 2>/dev/null | wc -l)
fi
cat > "$kb_file" << EOF
# 立交桥项目经验与规则
> 更新时间:$(date '+%Y-%m-%d %H:%M:%S')
> 版本:$(date '+%Y%m%d')
## 一、项目关键规范
### 1.1 架构原则
- Provider Adapter抽象层设计
- 三层降级策略(同平台换号/同区域换平台/全局降级)
- 分阶段验证S2-A/B/C1/C2
### 1.2 安全红线
- 内网隔离 + mTLS双向认证
- 契约漂移CI阻断
- 密钥90天轮换
### 1.3 质量门禁
- 接管率 >= 99.9% 覆盖率
- 自动回滚 <= 10分钟
- 服务恢复 <= 30分钟
- 用户通知 <= 15分钟
## 二、待解决P0问题
- 数量:$issues 个(来自最新报告)
## 三、专家评审结论
| 维度 | 结论 | 评分 |
|------|------|------|
| 架构 | CONDITIONAL GO | 3.5/5 |
| API设计 | CONDITIONAL GO | 4.0/5 |
| 安全防护 | CONDITIONAL GO | 3.0/5 |
| 业务合规 | CONDITIONAL GO | 3.5/5 |
| 计费精度 | CONDITIONAL GO | 4.0/5 |
| 可靠性 | CONDITIONAL GO | 3.0/5 |
## 四、行动优先级
1. **P0**:安全验证、契约测试、降级演练
2. **P1**用户体验、SLA文档、计费准确性
3. **P2**SDK开发、法务确认、DDoS防护
---
**状态**:每日自动更新
**下次更新**$(date -d '+1 day' -d '3:00' '+%Y-%m-%d %H:%M')
EOF
log "INFO" "Knowledge base updated: $kb_file"
}
#-------------------------------------------------------------------------------
# 主函数
#-------------------------------------------------------------------------------
main() {
local mode="${1:-hourly}"
init
case "$mode" in
hourly)
log "INFO" "Running hourly review..."
local review_result=$(perform_review "hourly")
update_task_queue "$review_result"
generate_claude_tasks "$review_result"
;;
daily)
log "INFO" "Running daily full review..."
local review_result=$(perform_review "daily")
local report=$(generate_daily_report "$review_result")
update_task_queue "$review_result"
generate_claude_tasks "$review_result"
update_knowledge_base
log "INFO" "Daily review completed. Report: $report"
;;
force)
log "WARN" "Running forced full review..."
local review_result=$(perform_review "force")
local report=$(generate_daily_report "$review_result")
update_task_queue "$review_result"
generate_claude_tasks "$review_result"
log "INFO" "Forced review completed. Report: $report"
;;
*)
echo "Usage: $0 [hourly|daily|force]"
exit 1
;;
esac
}
main "$@"

View File

@@ -0,0 +1,42 @@
#===============================================================================
# 自动化Review配置
#===============================================================================
# 项目根目录
export PROJECT_ROOT="/home/long/project/立交桥"
# Review目录
export REVIEW_DIR="$PROJECT_ROOT/review"
export REPORT_DIR="$REVIEW_DIR/daily_reports"
export KNOWLEDGE_DIR="$REVIEW_DIR/knowledge_base"
# 任务队列
export TASK_QUEUE="$REVIEW_DIR/task_queue.json"
# 日志目录
export LOG_DIR="$PROJECT_ROOT/logs/auto_review"
# Review频率小时
export REVIEW_INTERVAL=3
# 每日更新时间小时24小时制
export DAILY_UPDATE_HOUR=3
# 需要检查的关键文档列表
export CRITICAL_DOCS=(
"docs/architecture_solution_v1_2026-03-18.md"
"docs/api_solution_v1_2026-03-18.md"
"docs/security_solution_v1_2026-03-18.md"
"docs/business_solution_v1_2026-03-18.md"
"docs/llm_gateway_prd_v1_2026-03-25.md"
"docs/supply_technical_design_enhanced_v1_2026-03-25.md"
)
# 专家评审角色
readonly EXPERT_ROLES="E01:架构负责人,E02:平台工程负责人,E03:SRE负责人,E04:安全负责人,E05:计费/数据负责人,E06:合规/法务接口人,E07:产品负责人,E13:用户代表,E14:测试负责人,E15:网关专家"
# Claude Code命令用于分发任务
export CLAUDE_CLI_CMD="claude"
# 是否启用Claude Code任务分发
export ENABLE_TASK_DISPATCH=true

View File

@@ -0,0 +1,10 @@
#===============================================================================
# 自动化Review Cron配置
#===============================================================================
# 使用方法crontab -e 并添加以下行
# 每3小时执行一次review0点、3点、6点、9点、12点、15点、18点、21点
0 */3 * * * /home/long/project/立交桥/scripts/auto_review/review.sh hourly >> /home/long/project/立交桥/logs/auto_review/cron.log 2>&1
# 每天凌晨3点执行全面review并更新知识库
0 3 * * * /home/long/project/立交桥/scripts/auto_review/review.sh daily >> /home/long/project/立交桥/logs/auto_review/cron_daily.log 2>&1

12
scripts/auto_review/review.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
#===============================================================================
# Review快速执行入口
#===============================================================================
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# 加载配置
source "$SCRIPT_DIR/auto_review_config.sh"
# 执行review
"$SCRIPT_DIR/auto_review.sh" "$@"

View File

@@ -0,0 +1,10 @@
{
"tasks": [],
"last_updated": "",
"last_review_date": "",
"review_stats": {
"total_reviews": 0,
"issues_found": 0,
"tasks_dispatched": 0
}
}

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -euo pipefail
PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
DATE_TAG="${1:-$(date +%F)}"
REPORT_DIR="$PROJECT_ROOT/reports/dependency"
SBOM_FILE="$REPORT_DIR/sbom_${DATE_TAG}.spdx.json"
LOCK_DIFF_FILE="$REPORT_DIR/lockfile_diff_${DATE_TAG}.md"
COMPAT_FILE="$REPORT_DIR/compat_matrix_${DATE_TAG}.md"
RISK_FILE="$REPORT_DIR/risk_register_${DATE_TAG}.md"
OUT_FILE="$REPORT_DIR/dependency_audit_result_${DATE_TAG}.md"
missing=0
for f in "$SBOM_FILE" "$LOCK_DIFF_FILE" "$COMPAT_FILE" "$RISK_FILE"; do
if [[ ! -s "$f" ]]; then
echo "[FAIL] missing or empty: $f"
missing=1
else
echo "[OK] found: $f"
fi
done
if [[ $missing -ne 0 ]]; then
exit 1
fi
if ! grep -q '"spdxVersion"' "$SBOM_FILE"; then
echo "[FAIL] sbom missing spdxVersion"
exit 1
fi
if ! grep -q '"packages"' "$SBOM_FILE"; then
echo "[FAIL] sbom missing packages"
exit 1
fi
for f in "$LOCK_DIFF_FILE" "$COMPAT_FILE" "$RISK_FILE"; do
if ! grep -q '^- Audit-Status: PASS' "$f"; then
echo "[FAIL] audit status not PASS in: $f"
exit 1
fi
done
cat > "$OUT_FILE" <<REPORT
# Dependency Audit Check Result (${DATE_TAG})
- Result: PASS
- M-017 (\`dependency_compat_audit_pass_pct\`): 100%
- Checked files:
1. ${SBOM_FILE##$PROJECT_ROOT/}
2. ${LOCK_DIFF_FILE##$PROJECT_ROOT/}
3. ${COMPAT_FILE##$PROJECT_ROOT/}
4. ${RISK_FILE##$PROJECT_ROOT/}
REPORT
echo "[PASS] dependency audit check complete"
echo "result report: $OUT_FILE"

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
TS="$(date +%F_%H%M%S)"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
REPORT_FILE="${OUT_DIR}/final_decision_consistency_${TS}.md"
LOG_FILE="${OUT_DIR}/final_decision_consistency_${TS}.log"
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
parse_checkbox_decision() {
local file="$1"
local go="0"
local cgo="0"
local nogo="0"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
grep -Eq '^- \[x\] (GO|通过)' "${file}" && go="1" || true
grep -Eq '^- \[x\] (CONDITIONAL GO|有条件通过)' "${file}" && cgo="1" || true
grep -Eq '^- \[x\] (NO-GO|不通过)' "${file}" && nogo="1" || true
if [[ "${go}" == "1" ]]; then
echo "GO"
return
fi
if [[ "${cgo}" == "1" ]]; then
echo "CONDITIONAL_GO"
return
fi
if [[ "${nogo}" == "1" ]]; then
echo "NO_GO"
return
fi
echo "UNKNOWN"
}
parse_machine_decision() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
local row
row="$(grep -E '^\- (机判结论|决策)\*\*' "${file}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "UNKNOWN"
return
fi
if echo "${row}" | grep -q 'NO_GO'; then
echo "NO_GO"
return
fi
if echo "${row}" | grep -q 'CONDITIONAL_GO'; then
echo "CONDITIONAL_GO"
return
fi
if echo "${row}" | grep -q 'GO'; then
echo "GO"
return
fi
echo "UNKNOWN"
}
FINAL_DECISION_FILE="${ROOT_DIR}/review/final_decision_2026-03-31.md"
TOK007_FILE="$(latest_file_or_empty "${ROOT_DIR}/review/outputs/tok007_release_recheck_*.md")"
SP_FILE="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/superpowers_stage_validation_*.md")"
FINAL_DECISION="$(parse_checkbox_decision "${FINAL_DECISION_FILE}")"
TOK007_DECISION="$(parse_machine_decision "${TOK007_FILE}")"
SP_DECISION="$(parse_machine_decision "${SP_FILE}")"
CONSISTENCY_STATUS="PASS"
CONSISTENCY_NOTE="final decision is aligned with latest machine recheck"
if [[ "${FINAL_DECISION}" == "UNKNOWN" || "${TOK007_DECISION}" == "UNKNOWN" || "${SP_DECISION}" == "UNKNOWN" ]]; then
CONSISTENCY_STATUS="FAIL"
CONSISTENCY_NOTE="cannot parse one or more decision sources"
elif [[ "${FINAL_DECISION}" != "${TOK007_DECISION}" ]]; then
CONSISTENCY_STATUS="WARN"
CONSISTENCY_NOTE="final signed decision lags latest machine recheck; requires manual review update"
fi
cat > "${REPORT_FILE}" <<EOF
# Final Decision Consistency Check
- 时间戳:${TS}
- 执行脚本:\`scripts/ci/final_decision_consistency_check.sh\`
## 1. 输入源
| 来源 | 路径 | 解析结论 |
|---|---|---|
| final_decision | ${FINAL_DECISION_FILE} | ${FINAL_DECISION} |
| tok007_recheck | ${TOK007_FILE:-N/A} | ${TOK007_DECISION} |
| superpowers_stage_validation | ${SP_FILE:-N/A} | ${SP_DECISION} |
## 2. 一致性结果
- 状态:**${CONSISTENCY_STATUS}**
- 说明:${CONSISTENCY_NOTE}
## 3. 建议动作
1. 若状态为 WARN人工确认是否需要更新 \`review/final_decision_2026-03-31.md\` 的勾选与签署记录。
2. 若状态为 FAIL先修复报告来源或解析格式再重新执行本检查。
3. staging 真值就绪后,按顺序重跑:
1. \`scripts/ci/superpowers_stage_validate.sh\`
2. \`scripts/ci/tok007_release_recheck.sh\`
3. \`scripts/ci/final_decision_consistency_check.sh\`
EOF
{
echo "[INFO] FINAL_DECISION=${FINAL_DECISION}"
echo "[INFO] TOK007_DECISION=${TOK007_DECISION}"
echo "[INFO] SP_DECISION=${SP_DECISION}"
echo "[RESULT] ${CONSISTENCY_STATUS}"
echo "[INFO] REPORT=${REPORT_FILE}"
} | tee "${LOG_FILE}"
if [[ "${CONSISTENCY_STATUS}" == "FAIL" ]]; then
exit 1
fi

View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
ENV_REL="${1:-scripts/supply-gate/.env.staging-real}"
if [[ "${ENV_REL}" == /* ]]; then
ENV_PATH="${ENV_REL}"
else
ENV_PATH="${ROOT_DIR}/${ENV_REL}"
fi
API_BASE_URL_VALUE="${API_BASE_URL_VALUE:-http://127.0.0.1:18080}"
TOKEN_RUNTIME_URL="${TOKEN_RUNTIME_URL:-http://127.0.0.1:18081}"
TOKEN_TTL_SECONDS="${TOKEN_TTL_SECONDS:-7200}"
TOKEN_SUBJECT_PREFIX="${TOKEN_SUBJECT_PREFIX:-local-staging-real}"
START_RUNTIME_IF_NEEDED="${START_RUNTIME_IF_NEEDED:-1}"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
TS="$(date +%F_%H%M%S)"
RUNTIME_LOG="${OUT_DIR}/local_token_runtime_generate_env_${TS}.log"
REPORT_FILE="${OUT_DIR}/local_staging_env_generation_${TS}.md"
RUNTIME_STARTED_BY_SCRIPT=0
RUNTIME_PID=""
require_bin() {
local b="$1"
if ! command -v "${b}" >/dev/null 2>&1; then
echo "[FAIL] missing required binary: ${b}"
exit 1
fi
}
require_bin curl
require_bin jq
require_bin date
require_bin ss
require_bin awk
require_bin sed
require_bin sha256sum
is_http_ready() {
local url="$1"
curl -sS -m 1 "${url}/actuator/health" 2>/dev/null | grep -q '"UP"'
}
is_port_in_use() {
local port="$1"
ss -ltn | awk '{print $4}' | grep -Eq "[:.]${port}$"
}
pick_free_port() {
local base="${1:-18091}"
local max_tries="${2:-80}"
local p="${base}"
local i=0
while [[ "${i}" -lt "${max_tries}" ]]; do
if ! is_port_in_use "${p}"; then
echo "${p}"
return 0
fi
p=$((p + 1))
i=$((i + 1))
done
return 1
}
cleanup() {
if [[ "${RUNTIME_STARTED_BY_SCRIPT}" == "1" && -n "${RUNTIME_PID}" ]]; then
kill "${RUNTIME_PID}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
ensure_runtime() {
if is_http_ready "${TOKEN_RUNTIME_URL}"; then
return 0
fi
if [[ "${START_RUNTIME_IF_NEEDED}" != "1" ]]; then
echo "[FAIL] token runtime not ready: ${TOKEN_RUNTIME_URL}"
echo "[HINT] set START_RUNTIME_IF_NEEDED=1 or start token runtime manually"
exit 1
fi
local go_bin="${ROOT_DIR}/.tools/go-current/bin/go"
if [[ ! -x "${go_bin}" ]]; then
go_bin="$(command -v go || true)"
fi
if [[ -z "${go_bin}" ]]; then
echo "[FAIL] go binary not found; cannot start local token runtime"
exit 1
fi
local port
if ! port="$(pick_free_port 18091 80)"; then
echo "[FAIL] no free port found for temporary token runtime"
exit 1
fi
TOKEN_RUNTIME_URL="http://127.0.0.1:${port}"
(
cd "${ROOT_DIR}/platform-token-runtime"
export PATH="$(dirname "${go_bin}"):${PATH}"
export GOCACHE="${ROOT_DIR}/.tools/go-cache"
export GOPATH="${ROOT_DIR}/.tools/go"
TOKEN_RUNTIME_ADDR=":${port}" "${go_bin}" run ./cmd/platform-token-runtime
) >"${RUNTIME_LOG}" 2>&1 &
RUNTIME_PID=$!
RUNTIME_STARTED_BY_SCRIPT=1
for _ in {1..50}; do
if is_http_ready "${TOKEN_RUNTIME_URL}"; then
return 0
fi
sleep 0.2
done
echo "[FAIL] temporary token runtime failed to become ready: ${TOKEN_RUNTIME_URL}"
echo "[INFO] log: ${RUNTIME_LOG}"
exit 1
}
issue_token() {
local role="$1"
local scope_json="$2"
local req_id="req-gen-${role}-${TS}"
local idem="idem-gen-${role}-${TS}"
local subject="${TOKEN_SUBJECT_PREFIX}-${role}-${TS}"
local payload
payload="$(jq -n \
--arg s "${subject}" \
--arg r "${role}" \
--argjson ttl "${TOKEN_TTL_SECONDS}" \
--argjson sc "${scope_json}" \
'{subject_id:$s,role:$r,ttl_seconds:$ttl,scope:$sc}')"
local body_file
body_file="$(mktemp)"
local status
status="$(curl -sS -m 8 -o "${body_file}" -w "%{http_code}" \
-X POST "${TOKEN_RUNTIME_URL}/api/v1/platform/tokens/issue" \
-H "Content-Type: application/json" \
-H "X-Request-Id: ${req_id}" \
-H "Idempotency-Key: ${idem}" \
-d "${payload}")"
if [[ "${status}" != "201" ]]; then
echo "[FAIL] issue ${role} token failed, status=${status}"
cat "${body_file}" || true
rm -f "${body_file}"
exit 1
fi
local token
token="$(jq -r '.data.access_token // empty' "${body_file}")"
rm -f "${body_file}"
if [[ -z "${token}" ]]; then
echo "[FAIL] issue ${role} token returned empty access_token"
exit 1
fi
echo "${token}"
}
ensure_runtime
OWNER_TOKEN="$(issue_token "owner" "[\"supply:*\"]")"
VIEWER_TOKEN="$(issue_token "viewer" "[\"supply:read\"]")"
ADMIN_TOKEN="$(issue_token "admin" "[\"supply:*\"]")"
EXP_UTC="$(date -u -d "+${TOKEN_TTL_SECONDS} seconds" +%Y-%m-%dT%H:%M:%SZ)"
mkdir -p "$(dirname "${ENV_PATH}")"
cat > "${ENV_PATH}" <<EOF
# local staging-real(simulated) generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)
# token nominal expiry: ${EXP_UTC}
# token runtime source: ${TOKEN_RUNTIME_URL}
API_BASE_URL="${API_BASE_URL_VALUE}"
OWNER_BEARER_TOKEN="${OWNER_TOKEN}"
VIEWER_BEARER_TOKEN="${VIEWER_TOKEN}"
ADMIN_BEARER_TOKEN="${ADMIN_TOKEN}"
TEST_PROVIDER="openai"
TEST_MODEL="gpt-4o"
TEST_ACCOUNT_ALIAS="sup_acc_cmd"
TEST_CREDENTIAL_INPUT="sk-test-replace-me"
TEST_PAYMENT_METHOD="alipay"
TEST_PAYMENT_ACCOUNT="tester@example.com"
TEST_SMS_CODE="123456"
SUPPLIER_DIRECT_TEST_URL=""
EOF
chmod 600 "${ENV_PATH}"
owner_hash="$(printf "%s" "${OWNER_TOKEN}" | sha256sum | awk '{print substr($1,1,12)}')"
viewer_hash="$(printf "%s" "${VIEWER_TOKEN}" | sha256sum | awk '{print substr($1,1,12)}')"
admin_hash="$(printf "%s" "${ADMIN_TOKEN}" | sha256sum | awk '{print substr($1,1,12)}')"
{
echo "# Local Staging Env Generation"
echo
echo "- 时间戳:${TS}"
echo "- 输出文件:\`${ENV_PATH}\`"
echo "- API_BASE_URL\`${API_BASE_URL_VALUE}\`"
echo "- token nominal expiry(UTC)\`${EXP_UTC}\`"
echo "- token runtime\`${TOKEN_RUNTIME_URL}\`"
echo "- runtime auto-start\`${RUNTIME_STARTED_BY_SCRIPT}\`"
echo
echo "## Token 摘要(不含明文)"
echo
echo "| role | length | sha256_12 |"
echo "|---|---:|---|"
echo "| owner | ${#OWNER_TOKEN} | ${owner_hash} |"
echo "| viewer | ${#VIEWER_TOKEN} | ${viewer_hash} |"
echo "| admin | ${#ADMIN_TOKEN} | ${admin_hash} |"
echo
echo "## 下一步"
echo
echo "1. 使用该 env 执行:\`ALLOW_LOCAL_MOCK_STAGING=1 bash scripts/ci/staging_release_pipeline.sh ${ENV_PATH}\`"
echo "2. 若切换真实 staging更新 \`API_BASE_URL\` 后复跑。"
} > "${REPORT_FILE}"
echo "[PASS] env generated: ${ENV_PATH}"
echo "[INFO] report: ${REPORT_FILE}"
if [[ "${RUNTIME_STARTED_BY_SCRIPT}" == "1" ]]; then
echo "[INFO] runtime log: ${RUNTIME_LOG}"
fi

View File

@@ -0,0 +1,118 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
DATE_TAG="${1:-$(date +%F)}"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
SNAPSHOT_MD="${OUT_DIR}/metrics_daily_snapshot_${DATE_TAG}.md"
SNAPSHOT_CSV="${OUT_DIR}/metrics_daily_snapshots.csv"
DRIFT_MD="${ROOT_DIR}/reports/design_drift_daily_${DATE_TAG}.md"
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
DEP_FILE="$(latest_file_or_empty "${ROOT_DIR}/reports/dependency/dependency_audit_result_*.md")"
SP_FILE="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/superpowers_stage_validation_*.md")"
TRACE_FILE="$(latest_file_or_empty "${ROOT_DIR}/reports/supply_traceability_matrix_*.csv")"
M017="0.00"
M018="0.00"
M019="0.00"
M017_NOTE="dependency audit report missing"
M018_NOTE="superpowers stage validation report missing"
M019_NOTE="traceability matrix missing"
if [[ -f "${DEP_FILE}" ]] && grep -q 'Result: PASS' "${DEP_FILE}"; then
M017="100.00"
M017_NOTE="dependency audit result PASS"
fi
if [[ -f "${SP_FILE}" ]]; then
total_steps="$(grep -E '^\| PHASE-' "${SP_FILE}" | wc -l | tr -d ' ')"
pass_steps="$(grep -E '^\| PHASE-[0-9]+ \| PASS \|' "${SP_FILE}" | wc -l | tr -d ' ')"
if [[ "${total_steps}" -gt 0 ]]; then
M018="$(awk -v p="${pass_steps}" -v t="${total_steps}" 'BEGIN{printf "%.2f", (p/t)*100}')"
M018_NOTE="pass_steps=${pass_steps}/${total_steps}"
fi
fi
if [[ -f "${TRACE_FILE}" ]]; then
total_rows="$(awk -F',' 'NR>1{count++} END{print count+0}' "${TRACE_FILE}")"
tracked_rows="$(awk -F',' 'NR>1{if($1!="" && $3!="" && $5!="" && $6!="" && $7!="")count++} END{print count+0}' "${TRACE_FILE}")"
if [[ "${total_rows}" -gt 0 ]]; then
M019="$(awk -v t="${tracked_rows}" -v a="${total_rows}" 'BEGIN{printf "%.2f", (t/a)*100}')"
M019_NOTE="tracked_rows=${tracked_rows}/${total_rows}"
fi
fi
M017_STATUS="PASS"; [[ "${M017}" != "100.00" ]] && M017_STATUS="FAIL"
M018_STATUS="PASS"; [[ "${M018}" != "100.00" ]] && M018_STATUS="FAIL"
M019_STATUS="PASS"; [[ "${M019}" != "100.00" ]] && M019_STATUS="FAIL"
if [[ ! -f "${SNAPSHOT_CSV}" ]]; then
echo "date,m017,m018,m019,m017_status,m018_status,m019_status,dep_file,stage_file,trace_file" > "${SNAPSHOT_CSV}"
fi
tmp_csv="$(mktemp)"
awk -F',' -v d="${DATE_TAG}" '
NR==1 {print; next}
$1==d {next}
$1 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}-debug$/ {next}
{print}
' "${SNAPSHOT_CSV}" > "${tmp_csv}"
echo "${DATE_TAG},${M017},${M018},${M019},${M017_STATUS},${M018_STATUS},${M019_STATUS},${DEP_FILE},${SP_FILE},${TRACE_FILE}" >> "${tmp_csv}"
mv "${tmp_csv}" "${SNAPSHOT_CSV}"
cat > "${SNAPSHOT_MD}" <<EOF
# 每日门禁指标快照(${DATE_TAG}
## 1. 指标结果
| 指标ID | 值 | 目标 | 结果 | 说明 |
|---|---:|---:|---|---|
| M-017 | ${M017}% | 100% | ${M017_STATUS} | ${M017_NOTE} |
| M-018 | ${M018}% | 100% | ${M018_STATUS} | ${M018_NOTE} |
| M-019 | ${M019}% | 100% | ${M019_STATUS} | ${M019_NOTE} |
## 2. 数据源
1. dependency${DEP_FILE:-N/A}
2. stage validation${SP_FILE:-N/A}
3. traceability matrix${TRACE_FILE:-N/A}
## 3. 快照存档
1. CSV\`${SNAPSHOT_CSV}\`
2. 日报:\`${SNAPSHOT_MD}\`
EOF
DRIFT_STATUS="PASS"
if [[ "${M019_STATUS}" != "PASS" ]]; then
DRIFT_STATUS="FAIL"
fi
cat > "${DRIFT_MD}" <<EOF
# 需求-设计-测试漂移日检(${DATE_TAG}
- 状态:**${DRIFT_STATUS}**
- 依据M-019=${M019}%(目标=100%
## 检查结论
1. 若 M-019 < 100%,判定存在追踪漂移风险。
2. 当前说明:${M019_NOTE}
## 处理动作
1. 若 FAIL24h 内补齐缺失追踪项并复跑本脚本。
2. 若 PASS纳入 7 日趋势统计。
EOF
echo "[PASS] daily snapshot generated: ${SNAPSHOT_MD}"
echo "[PASS] drift report generated: ${DRIFT_MD}"

View File

@@ -0,0 +1,61 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
END_DATE="${1:-$(date +%F)}"
OUT_DIR="${ROOT_DIR}/reports/gates"
SNAPSHOT_CSV="${OUT_DIR}/metrics_daily_snapshots.csv"
OUT_MD="${OUT_DIR}/metrics_trend_7d_${END_DATE}.md"
if [[ ! -f "${SNAPSHOT_CSV}" ]]; then
echo "[FAIL] missing snapshot csv: ${SNAPSHOT_CSV}"
exit 1
fi
tmp_rows="$(mktemp)"
{
head -n 1 "${SNAPSHOT_CSV}"
tail -n +2 "${SNAPSHOT_CSV}" \
| awk -F',' '$1 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}$/' \
| sort -t, -k1,1 \
| tail -n 7
} > "${tmp_rows}"
data_count="$(tail -n +2 "${tmp_rows}" | wc -l | tr -d ' ')"
if [[ "${data_count}" -eq 0 ]]; then
echo "[FAIL] no snapshot rows found"
rm -f "${tmp_rows}"
exit 1
fi
all_pass_days="$(awk -F',' 'NR>1{if($5=="PASS" && $6=="PASS" && $7=="PASS")c++} END{print c+0}' "${tmp_rows}")"
trend_status="NOT_READY"
trend_note="need 7 all-pass days to satisfy continuous trend requirement"
if [[ "${data_count}" -ge 7 && "${all_pass_days}" -eq 7 ]]; then
trend_status="PASS_7D"
trend_note="7 consecutive days all PASS"
fi
{
echo "# M-017/M-018/M-019 7日趋势报告截至 ${END_DATE}"
echo
echo "## 1. 汇总"
echo
echo "- 采样天数:${data_count}"
echo "- 全通过天数:${all_pass_days}"
echo "- 趋势状态:**${trend_status}**"
echo "- 说明:${trend_note}"
echo
echo "## 2. 明细"
echo
echo "| 日期 | M-017 | M-018 | M-019 | M-017状态 | M-018状态 | M-019状态 |"
echo "|---|---:|---:|---:|---|---|---|"
awk -F',' 'NR>1{printf "| %s | %s%% | %s%% | %s%% | %s | %s | %s |\n",$1,$2,$3,$4,$5,$6,$7}' "${tmp_rows}"
echo
echo "## 3. 数据源"
echo
echo "1. \`${SNAPSHOT_CSV}\`"
} > "${OUT_MD}"
rm -f "${tmp_rows}"
echo "[PASS] trend report generated: ${OUT_MD}"

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
DATE_TAG="${1:-$(date +%F)}"
ENV_FILE_REL="${2:-scripts/supply-gate/.env.minimax-dev}"
if [[ "${ENV_FILE_REL}" == /* ]]; then
ENV_FILE="${ENV_FILE_REL}"
else
ENV_FILE="${ROOT_DIR}/${ENV_FILE_REL}"
fi
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
SNAPSHOT_CSV="${OUT_DIR}/minimax_upstream_daily_snapshots.csv"
SNAPSHOT_MD="${OUT_DIR}/minimax_upstream_daily_snapshot_${DATE_TAG}.md"
RUN_ACTIVE_SMOKE="${RUN_ACTIVE_SMOKE:-0}"
extract_overall() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
grep -E '^- 总体结论:\*\*' "${file}" | head -n 1 | sed -E 's/^- 总体结论:\*\*([^*]+)\*\*$/\1/' || true
}
find_latest_smoke_report() {
local choose_real_only="$1"
local candidate=""
local first_any=""
for candidate in $(ls -1t "${OUT_DIR}"/minimax_upstream_smoke_*.md 2>/dev/null || true); do
if [[ -z "${first_any}" ]]; then
first_any="${candidate}"
fi
if [[ "${choose_real_only}" == "1" ]]; then
overall="$(extract_overall "${candidate}")"
if [[ "${overall}" != "PASS_DRY_RUN" ]]; then
echo "${candidate}"
return
fi
else
echo "${candidate}"
return
fi
done
echo "${first_any}"
}
if [[ "${RUN_ACTIVE_SMOKE}" == "1" ]]; then
bash "${ROOT_DIR}/scripts/supply-gate/minimax_upstream_smoke.sh" "${ENV_FILE}"
fi
LATEST_REPORT="$(find_latest_smoke_report "1")"
if [[ -z "${LATEST_REPORT}" || ! -f "${LATEST_REPORT}" ]]; then
echo "[FAIL] no minimax smoke report found under ${OUT_DIR}"
exit 1
fi
OVERALL="$(extract_overall "${LATEST_REPORT}")"
BASE_HTTP="$(grep -E '^- http_code' "${LATEST_REPORT}" | sed -n '1p' | sed -E 's/^- http_code([0-9]+)$/\1/' || true)"
ACTIVE_HTTP="$(grep -E '^- http_code' "${LATEST_REPORT}" | sed -n '2p' | sed -E 's/^- http_code([0-9]+)$/\1/' || true)"
if [[ -z "${OVERALL}" ]]; then
OVERALL="UNKNOWN"
fi
STATUS="FAIL"
if [[ "${OVERALL}" == "PASS" || "${OVERALL}" == "PASS_AUTH_REACHED" ]]; then
STATUS="PASS"
elif [[ "${OVERALL}" == "PASS_DRY_RUN" ]]; then
STATUS="CONDITIONAL_PASS"
fi
NOTE="latest_report=${LATEST_REPORT}"
if [[ "${RUN_ACTIVE_SMOKE}" != "1" ]]; then
NOTE="${NOTE}; run_active_smoke=0(use latest report only)"
fi
if [[ ! -f "${SNAPSHOT_CSV}" ]]; then
echo "date,status,overall,base_http,active_http,run_active_smoke,report,note" > "${SNAPSHOT_CSV}"
fi
tmp_csv="$(mktemp)"
awk -F',' -v d="${DATE_TAG}" '
NR==1 {print; next}
$1==d {next}
{print}
' "${SNAPSHOT_CSV}" > "${tmp_csv}"
echo "${DATE_TAG},${STATUS},${OVERALL},${BASE_HTTP:-N/A},${ACTIVE_HTTP:-N/A},${RUN_ACTIVE_SMOKE},${LATEST_REPORT},${NOTE}" >> "${tmp_csv}"
mv "${tmp_csv}" "${SNAPSHOT_CSV}"
{
echo "# Minimax 上游每日快照(${DATE_TAG}"
echo
echo "- 运行模式RUN_ACTIVE_SMOKE=${RUN_ACTIVE_SMOKE}"
echo "- 环境文件:\`${ENV_FILE_REL}\`"
echo "- 快照结果:**${STATUS}**"
echo "- overall\`${OVERALL}\`"
echo "- base_http\`${BASE_HTTP:-N/A}\`"
echo "- active_http\`${ACTIVE_HTTP:-N/A}\`"
echo "- 证据:\`${LATEST_REPORT}\`"
echo
echo "## 说明"
echo
echo "1. RUN_ACTIVE_SMOKE=0 时仅汇总最新 smoke 报告,不触发外部请求。"
echo "2. RUN_ACTIVE_SMOKE=1 时会执行一次实时 smoke并更新快照。"
echo "3. 该快照用于上游可达性监控,不替代 SUP 发布门禁结论。"
echo
echo "## 存档"
echo
echo "1. CSV\`${SNAPSHOT_CSV}\`"
echo "2. 日报:\`${SNAPSHOT_MD}\`"
} > "${SNAPSHOT_MD}"
echo "[PASS] minimax daily snapshot generated: ${SNAPSHOT_MD}"

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
END_DATE="${1:-$(date +%F)}"
OUT_DIR="${ROOT_DIR}/reports/gates"
SNAPSHOT_CSV="${OUT_DIR}/minimax_upstream_daily_snapshots.csv"
OUT_MD="${OUT_DIR}/minimax_upstream_trend_7d_${END_DATE}.md"
if [[ ! -f "${SNAPSHOT_CSV}" ]]; then
echo "[FAIL] missing minimax snapshot csv: ${SNAPSHOT_CSV}"
exit 1
fi
tmp_rows="$(mktemp)"
{
head -n 1 "${SNAPSHOT_CSV}"
tail -n +2 "${SNAPSHOT_CSV}" \
| awk -F',' '$1 ~ /^[0-9]{4}-[0-9]{2}-[0-9]{2}$/' \
| sort -t, -k1,1 \
| tail -n 7
} > "${tmp_rows}"
data_count="$(tail -n +2 "${tmp_rows}" | wc -l | tr -d ' ')"
if [[ "${data_count}" -eq 0 ]]; then
echo "[FAIL] no minimax snapshot rows found"
rm -f "${tmp_rows}"
exit 1
fi
pass_days="$(awk -F',' 'NR>1 && $2=="PASS"{c++} END{print c+0}' "${tmp_rows}")"
conditional_days="$(awk -F',' 'NR>1 && $2=="CONDITIONAL_PASS"{c++} END{print c+0}' "${tmp_rows}")"
fail_days="$(awk -F',' 'NR>1 && $2=="FAIL"{c++} END{print c+0}' "${tmp_rows}")"
trend_status="INSUFFICIENT_DATA"
trend_note="less than 7 days of minimax snapshots"
if [[ "${data_count}" -ge 7 ]]; then
trend_status="NOT_READY"
trend_note="need 7 PASS days to mark stable upstream trend"
if [[ "${pass_days}" -eq 7 ]]; then
trend_status="PASS_7D"
trend_note="7 consecutive PASS days reached"
elif [[ "${fail_days}" -eq 0 && "${conditional_days}" -gt 0 ]]; then
trend_status="CONDITIONAL_7D"
trend_note="no FAIL but contains CONDITIONAL_PASS days"
fi
fi
{
echo "# Minimax 上游 7 日趋势报告(截至 ${END_DATE}"
echo
echo "## 1. 汇总"
echo
echo "- 采样天数:${data_count}"
echo "- PASS 天数:${pass_days}"
echo "- CONDITIONAL_PASS 天数:${conditional_days}"
echo "- FAIL 天数:${fail_days}"
echo "- 趋势状态:**${trend_status}**"
echo "- 说明:${trend_note}"
echo
echo "## 2. 明细"
echo
echo "| 日期 | 状态 | overall | base_http | active_http | run_active_smoke | 报告 |"
echo "|---|---|---|---:|---:|---:|---|"
awk -F',' 'NR>1{printf "| %s | %s | %s | %s | %s | %s | %s |\n",$1,$2,$3,$4,$5,$6,$7}' "${tmp_rows}"
echo
echo "## 3. 数据源"
echo
echo "1. \`${SNAPSHOT_CSV}\`"
echo "2. 本报告仅用于 Minimax 上游可达性趋势,不替代 SUP 发布门禁结论。"
} > "${OUT_MD}"
rm -f "${tmp_rows}"
echo "[PASS] minimax trend report generated: ${OUT_MD}"

59
scripts/ci/stage-gate-drill.sh Executable file
View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
set -euo pipefail
FAIL_STAGE="${1:-G3}"
DATE_TAG="${2:-$(date +%F)}"
PROJECT_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
OUT_DIR="$PROJECT_ROOT/reports/gates"
mkdir -p "$OUT_DIR"
LOG_FILE="$OUT_DIR/stage_gate_drill_${DATE_TAG}.log"
stages=(G0 G1 G2 G3 G4 G5)
: > "$LOG_FILE"
log() {
echo "$1" | tee -a "$LOG_FILE"
}
log "[INFO] stage gate drill start, fail_stage=$FAIL_STAGE, date=$DATE_TAG"
pass_count=0
failed=0
failed_stage=""
rollback_to=""
for s in "${stages[@]}"; do
if [[ "$s" == "$FAIL_STAGE" ]]; then
log "[FAIL] $s quality gate check failed: simulated contract drift"
failed=1
failed_stage="$s"
break
fi
log "[PASS] $s quality gate check passed"
pass_count=$((pass_count+1))
done
if [[ $failed -eq 0 ]]; then
log "[INFO] no failure injected; drill considered invalid"
exit 2
fi
case "$failed_stage" in
G0) rollback_to="G0" ;;
G1) rollback_to="G0" ;;
G2) rollback_to="G1" ;;
G3) rollback_to="G2" ;;
G4) rollback_to="G3" ;;
G5) rollback_to="G4" ;;
*) rollback_to="G0" ;;
esac
log "[ACTION] rollback triggered: from $failed_stage to $rollback_to"
log "[ACTION] freeze subsequent promotion stages"
log "[ACTION] open corrective task with 24h SLA"
log "[PASS] rollback drill complete"
echo "LOG_FILE=$LOG_FILE"
echo "PASS_COUNT=$pass_count"
echo "FAILED_STAGE=$failed_stage"
echo "ROLLBACK_TO=$rollback_to"

View File

@@ -0,0 +1,280 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
OUT_DIR="${ROOT_DIR}/reports/gates"
TS="$(date +%F_%H%M%S)"
OUT_FILE="${OUT_DIR}/staging_token_go_evidence_autofill_${TS}.md"
LOG_FILE="${OUT_DIR}/staging_token_go_evidence_autofill_${TS}.log"
mkdir -p "${OUT_DIR}"
usage() {
cat <<'EOF'
Usage:
bash scripts/ci/staging_evidence_autofill.sh [options]
Options:
--staging-run-log <path> 指定 staging_run_*.log
--stage-report <path> 指定 superpowers_stage_validation_*.md
--token-readiness <path> 指定 token_runtime_readiness_*.md
--tok007-report <path> 指定 tok007_release_recheck_*.md
--pipeline-report <path> 指定 superpowers_release_pipeline_*.md
--sec-report <path> 指定 sec_sup_boundary_report_*.md
--out-file <path> 指定输出 markdown 文件路径
-h, --help 查看帮助
EOF
}
resolve_path() {
local value="$1"
if [[ -z "${value}" ]]; then
echo ""
return
fi
if [[ "${value}" == /* ]]; then
echo "${value}"
else
echo "${ROOT_DIR}/${value}"
fi
}
require_arg() {
local opt="$1"
local value="${2:-}"
if [[ -z "${value}" ]]; then
echo "[FAIL] missing value for ${opt}" >&2
usage >&2
exit 1
fi
}
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
extract_phase_status() {
local file="$1"
local phase="$2"
if [[ ! -f "${file}" ]]; then
echo "N/A"
return
fi
awk -F'|' -v p="${phase}" '
{
f2=$2
gsub(/^ +| +$/, "", f2)
if (f2 == p) {
f3=$3
gsub(/^ +| +$/, "", f3)
print f3
found=1
exit
}
}
END { if (!found) print "N/A" }
' "${file}"
}
extract_metric_from_sec_report() {
local file="$1"
local metric="$2"
if [[ ! -f "${file}" ]]; then
echo "N/A"
return
fi
awk -F'|' -v m="${metric}" '
{
f2=$2
gsub(/^ +| +$/, "", f2)
if (f2 == m) {
f3=$3
gsub(/^ +| +$/, "", f3)
print f3
found=1
exit
}
}
END { if (!found) print "N/A" }
' "${file}"
}
extract_m021_value() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "N/A"
return
fi
local row
row="$(grep -E '^- 数值:' "${file}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "N/A"
return
fi
echo "${row#- 数值:}"
}
extract_m021_result() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "N/A"
return
fi
local row
row="$(grep -E '^- 结果:\*\*' "${file}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "N/A"
return
fi
if echo "${row}" | grep -q 'PASS'; then
echo "PASS"
return
fi
if echo "${row}" | grep -q 'FAIL'; then
echo "FAIL"
return
fi
echo "N/A"
}
extract_tok007_machine_decision() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "N/A"
return
fi
local row
row="$(grep -E '^- 机判结论:\*\*' "${file}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "N/A"
return
fi
echo "${row}" | sed -E 's/^- 机判结论:\*\*([^*]+)\*\*$/\1/'
}
STAGING_RUN_LOG=""
SP_REPORT=""
TOK021_REPORT=""
TOK007_REPORT=""
PIPELINE_REPORT=""
SEC_REPORT="${ROOT_DIR}/tests/supply/sec_sup_boundary_report_2026-03-30.md"
while [[ $# -gt 0 ]]; do
case "$1" in
--staging-run-log)
require_arg "$1" "${2:-}"
STAGING_RUN_LOG="$(resolve_path "$2")"
shift 2
;;
--stage-report)
require_arg "$1" "${2:-}"
SP_REPORT="$(resolve_path "$2")"
shift 2
;;
--token-readiness)
require_arg "$1" "${2:-}"
TOK021_REPORT="$(resolve_path "$2")"
shift 2
;;
--tok007-report)
require_arg "$1" "${2:-}"
TOK007_REPORT="$(resolve_path "$2")"
shift 2
;;
--pipeline-report)
require_arg "$1" "${2:-}"
PIPELINE_REPORT="$(resolve_path "$2")"
shift 2
;;
--sec-report)
require_arg "$1" "${2:-}"
SEC_REPORT="$(resolve_path "$2")"
shift 2
;;
--out-file)
require_arg "$1" "${2:-}"
OUT_FILE="$(resolve_path "$2")"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "[FAIL] unknown arg: $1" >&2
usage >&2
exit 1
;;
esac
done
if [[ -z "${STAGING_RUN_LOG}" ]]; then
STAGING_RUN_LOG="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/staging_run_*.log")"
fi
if [[ -z "${SP_REPORT}" ]]; then
SP_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/superpowers_stage_validation_*.md")"
fi
if [[ -z "${TOK021_REPORT}" ]]; then
TOK021_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/token_runtime_readiness_*.md")"
fi
if [[ -z "${TOK007_REPORT}" ]]; then
TOK007_REPORT="$(latest_file_or_empty "${ROOT_DIR}/review/outputs/tok007_release_recheck_*.md")"
fi
if [[ -z "${PIPELINE_REPORT}" ]]; then
PIPELINE_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/superpowers_release_pipeline_*.md")"
fi
LOG_FILE="${OUT_DIR}/staging_token_go_evidence_autofill_${TS}.log"
PHASE07="$(extract_phase_status "${SP_REPORT}" "PHASE-07")"
M013="$(extract_metric_from_sec_report "${SEC_REPORT}" "M-013")"
M014="$(extract_metric_from_sec_report "${SEC_REPORT}" "M-014")"
M015="$(extract_metric_from_sec_report "${SEC_REPORT}" "M-015")"
M016="$(extract_metric_from_sec_report "${SEC_REPORT}" "M-016")"
M021_VALUE="$(extract_m021_value "${TOK021_REPORT}")"
M021_RESULT="$(extract_m021_result "${TOK021_REPORT}")"
TOK007_DECISION="$(extract_tok007_machine_decision "${TOK007_REPORT}")"
{
echo "# Staging 联调证据自动回填草稿"
echo
echo "- 生成时间:${TS}"
echo "- 生成脚本:\`scripts/ci/staging_evidence_autofill.sh\`"
echo
echo "## 1. 自动抽取结果"
echo
echo "| 项目 | 自动值 | 来源 |"
echo "|---|---|---|"
echo "| PHASE-07 | ${PHASE07} | ${SP_REPORT:-N/A} |"
echo "| M-013 | ${M013} | ${SEC_REPORT} |"
echo "| M-014 | ${M014} | ${SEC_REPORT} |"
echo "| M-015 | ${M015} | ${SEC_REPORT} |"
echo "| M-016 | ${M016} | ${SEC_REPORT} |"
echo "| M-021 | ${M021_VALUE} | ${TOK021_REPORT:-N/A} |"
echo "| M-021结果 | ${M021_RESULT} | ${TOK021_REPORT:-N/A} |"
echo "| TOK-007 机判 | ${TOK007_DECISION} | ${TOK007_REPORT:-N/A} |"
echo
echo "## 2. 证据路径清单"
echo
echo "1. staging run${STAGING_RUN_LOG:-N/A}"
echo "2. stage validate${SP_REPORT:-N/A}"
echo "3. token readiness${TOK021_REPORT:-N/A}"
echo "4. tok007 recheck${TOK007_REPORT:-N/A}"
echo "5. release pipeline${PIPELINE_REPORT:-N/A}"
echo "6. security boundary${SEC_REPORT}"
echo
echo "## 3. 人工确认项"
echo
echo "1. 若 PHASE-07 仍为 DEFERRED禁止将结论上调为 GO。"
echo "2. 若 M-013~M-016 来源为 mock必须在 staging 复测后覆盖。"
echo "3. 若 M-021 仅为开发阶段口径,需在 staging 复跑后再次回填。"
} > "${OUT_FILE}"
{
echo "[INFO] output=${OUT_FILE}"
echo "[INFO] PHASE-07=${PHASE07}, M021_RESULT=${M021_RESULT}, TOK007=${TOK007_DECISION}"
echo "[RESULT] PASS"
} | tee -a "${LOG_FILE}"

View File

@@ -0,0 +1,162 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
ENV_REL="${1:-scripts/supply-gate/.env.staging-real}"
if [[ "${ENV_REL}" == /* ]]; then
ENV_FILE="${ENV_REL}"
else
ENV_FILE="${ROOT_DIR}/${ENV_REL}"
fi
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
TS="$(date +%F_%H%M%S)"
REPORT_FILE="${OUT_DIR}/staging_real_readiness_${TS}.md"
LOG_FILE="${OUT_DIR}/staging_real_readiness_${TS}.log"
CHECK_IDS=()
CHECK_STATUS=()
CHECK_DESC=()
CHECK_EVIDENCE=()
add_check() {
CHECK_IDS+=("$1")
CHECK_STATUS+=("$2")
CHECK_DESC+=("$3")
CHECK_EVIDENCE+=("$4")
}
log() {
echo "$1" | tee -a "${LOG_FILE}" >/dev/null
}
if [[ ! -f "${ENV_FILE}" ]]; then
add_check "STG-RDY-001" "FAIL" "环境文件存在" "${ENV_FILE} (missing)"
else
add_check "STG-RDY-001" "PASS" "环境文件存在" "${ENV_FILE}"
fi
if [[ ! -f "${ENV_FILE}" ]]; then
{
echo "# 真实 STG 就绪度检查"
echo
echo "- 时间戳:${TS}"
echo "- 输入环境:\`${ENV_REL}\`"
echo "- 结果:**BLOCKED**"
echo
echo "| 检查项 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for i in "${!CHECK_IDS[@]}"; do
echo "| ${CHECK_IDS[$i]} | ${CHECK_STATUS[$i]} | ${CHECK_DESC[$i]} | ${CHECK_EVIDENCE[$i]} |"
done
} > "${REPORT_FILE}"
echo "[RESULT] BLOCKED" | tee -a "${LOG_FILE}" >/dev/null
echo "[INFO] report=${REPORT_FILE}"
exit 1
fi
# shellcheck disable=SC1090
source "${ENV_FILE}"
API_BASE_URL_VALUE="${API_BASE_URL:-}"
OWNER_TOKEN_VALUE="${OWNER_BEARER_TOKEN:-}"
VIEWER_TOKEN_VALUE="${VIEWER_BEARER_TOKEN:-}"
ADMIN_TOKEN_VALUE="${ADMIN_BEARER_TOKEN:-}"
if [[ -n "${API_BASE_URL_VALUE}" ]]; then
add_check "STG-RDY-002" "PASS" "API_BASE_URL 已配置" "${API_BASE_URL_VALUE}"
else
add_check "STG-RDY-002" "FAIL" "API_BASE_URL 已配置" "empty"
fi
if [[ "${API_BASE_URL_VALUE}" == *"staging.example.com"* ]]; then
add_check "STG-RDY-003" "FAIL" "API_BASE_URL 非占位值" "${API_BASE_URL_VALUE}"
elif [[ -n "${API_BASE_URL_VALUE}" ]]; then
add_check "STG-RDY-003" "PASS" "API_BASE_URL 非占位值" "${API_BASE_URL_VALUE}"
else
add_check "STG-RDY-003" "FAIL" "API_BASE_URL 非占位值" "empty"
fi
if echo "${API_BASE_URL_VALUE}" | grep -Eiq '127\.0\.0\.1|localhost'; then
add_check "STG-RDY-004" "FAIL" "API_BASE_URL 为真实外网 STG 地址" "${API_BASE_URL_VALUE} (local)"
else
add_check "STG-RDY-004" "PASS" "API_BASE_URL 为真实外网 STG 地址" "${API_BASE_URL_VALUE}"
fi
if [[ -n "${OWNER_TOKEN_VALUE}" && -n "${VIEWER_TOKEN_VALUE}" && -n "${ADMIN_TOKEN_VALUE}" ]]; then
add_check "STG-RDY-005" "PASS" "owner/viewer/admin token 已配置" "all present"
else
add_check "STG-RDY-005" "FAIL" "owner/viewer/admin token 已配置" "missing one or more token"
fi
has_placeholder=0
for t in "${OWNER_TOKEN_VALUE}" "${VIEWER_TOKEN_VALUE}" "${ADMIN_TOKEN_VALUE}"; do
if [[ "${t}" == replace-me-* || "${t}" == placeholder* || -z "${t}" ]]; then
has_placeholder=1
break
fi
done
if [[ "${has_placeholder}" == "1" ]]; then
add_check "STG-RDY-006" "FAIL" "token 非占位值" "placeholder/empty detected"
else
add_check "STG-RDY-006" "PASS" "token 非占位值" "ok"
fi
if [[ "${OWNER_TOKEN_VALUE}" == "${VIEWER_TOKEN_VALUE}" || "${OWNER_TOKEN_VALUE}" == "${ADMIN_TOKEN_VALUE}" || "${VIEWER_TOKEN_VALUE}" == "${ADMIN_TOKEN_VALUE}" ]]; then
add_check "STG-RDY-007" "WARN" "三类 token 建议区分角色" "at least two tokens are identical"
else
add_check "STG-RDY-007" "PASS" "三类 token 建议区分角色" "distinct tokens"
fi
reachable_status="000"
if [[ -n "${API_BASE_URL_VALUE}" ]]; then
reachable_status="$(curl -sS -m 5 -o /dev/null -w "%{http_code}" -I "${API_BASE_URL_VALUE}" 2>/dev/null || true)"
fi
if [[ "${reachable_status}" == "000" ]]; then
add_check "STG-RDY-008" "FAIL" "API_BASE_URL 可达性" "http_code=000"
else
add_check "STG-RDY-008" "PASS" "API_BASE_URL 可达性" "http_code=${reachable_status}"
fi
has_fail=0
for s in "${CHECK_STATUS[@]}"; do
if [[ "${s}" == "FAIL" ]]; then
has_fail=1
break
fi
done
RESULT="READY"
NOTE="all required checks passed"
if [[ "${has_fail}" == "1" ]]; then
RESULT="BLOCKED"
NOTE="at least one required check failed"
fi
{
echo "# 真实 STG 就绪度检查"
echo
echo "- 时间戳:${TS}"
echo "- 输入环境:\`${ENV_REL}\`"
echo "- 结果:**${RESULT}**"
echo "- 说明:${NOTE}"
echo
echo "| 检查项 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for i in "${!CHECK_IDS[@]}"; do
echo "| ${CHECK_IDS[$i]} | ${CHECK_STATUS[$i]} | ${CHECK_DESC[$i]} | ${CHECK_EVIDENCE[$i]} |"
done
echo
echo "## 结论"
echo
echo "1. 该检查用于判定“是否具备真实 STG 放行验证前提”。"
echo "2. 若结果为 BLOCKED不应执行真实放行口径判定。"
} > "${REPORT_FILE}"
echo "[INFO] report=${REPORT_FILE}" | tee -a "${LOG_FILE}" >/dev/null
echo "[RESULT] ${RESULT}" | tee -a "${LOG_FILE}" >/dev/null
if [[ "${RESULT}" != "READY" ]]; then
exit 1
fi

View File

@@ -0,0 +1,189 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
ENV_FILE_REL="${1:-scripts/supply-gate/.env}"
if [[ "${ENV_FILE_REL}" == /* ]]; then
ENV_FILE="${ENV_FILE_REL}"
else
ENV_FILE="${ROOT_DIR}/${ENV_FILE_REL}"
fi
TS="$(date +%F_%H%M%S)"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
REPORT_FILE="${OUT_DIR}/staging_release_pipeline_${TS}.md"
LOG_FILE="${OUT_DIR}/staging_release_pipeline_${TS}.log"
ALLOW_LOCAL_MOCK_STAGING="${ALLOW_LOCAL_MOCK_STAGING:-0}"
log() {
echo "$1" | tee -a "${LOG_FILE}"
}
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
read_env_api_base_url() {
local env_path="$1"
grep -E '^API_BASE_URL=' "${env_path}" | head -n 1 | cut -d'=' -f2- | tr -d '\"' || true
}
is_mock_staging_env() {
local env_path="$1"
if echo "${env_path}" | grep -Eiq 'local-mock'; then
return 0
fi
if [[ ! -f "${env_path}" ]]; then
return 1
fi
local api_base
api_base="$(read_env_api_base_url "${env_path}")"
if echo "${api_base}" | grep -Eiq '127\.0\.0\.1|localhost|staging\.example\.com'; then
return 0
fi
return 1
}
if [[ ! -f "${ENV_FILE}" ]]; then
log "[FAIL] env file not found: ${ENV_FILE}"
exit 1
fi
MOCK_SERVER_PID=""
ENV_CLASSIFICATION="REAL_STAGING"
if is_mock_staging_env "${ENV_FILE}"; then
ENV_CLASSIFICATION="LOCAL_MOCK"
if [[ "${ALLOW_LOCAL_MOCK_STAGING}" != "1" ]]; then
log "[FAIL] local/mock env detected (${ENV_FILE_REL})."
log "[FAIL] for safety, set ALLOW_LOCAL_MOCK_STAGING=1 to run this rehearsal explicitly."
exit 1
fi
log "[WARN] local/mock env acknowledged by ALLOW_LOCAL_MOCK_STAGING=1; result cannot be used as real staging evidence."
fi
if [[ "${ENV_CLASSIFICATION}" == "LOCAL_MOCK" ]]; then
API_BASE_URL="$(read_env_api_base_url "${ENV_FILE}")"
if [[ -n "${API_BASE_URL}" ]] && echo "${API_BASE_URL}" | grep -Eiq '127\.0\.0\.1|localhost'; then
if ! curl -sS -m 2 -I "${API_BASE_URL}" >/dev/null 2>&1; then
log "[INFO] local/mock API unreachable, starting mock server for rehearsal."
nohup python3 "${ROOT_DIR}/scripts/mock/supply_gateway_mock_server.py" \
> "${OUT_DIR}/staging_mock_server_${TS}.log" 2>&1 &
MOCK_SERVER_PID=$!
for _ in {1..20}; do
if curl -sS -m 2 -I "${API_BASE_URL}" >/dev/null 2>&1; then
break
fi
sleep 0.2
done
if ! curl -sS -m 2 -I "${API_BASE_URL}" >/dev/null 2>&1; then
log "[FAIL] cannot start local/mock server for ${API_BASE_URL}"
exit 1
fi
log "[INFO] local/mock server started pid=${MOCK_SERVER_PID}"
trap 'kill "${MOCK_SERVER_PID}" >/dev/null 2>&1 || true' EXIT
else
log "[INFO] local/mock API already reachable: ${API_BASE_URL}"
fi
fi
fi
STEP_RESULTS=()
run_step() {
local step_id="$1"
local title="$2"
local cmd="$3"
local out_file="${OUT_DIR}/${step_id,,}_${TS}.out.log"
log "[INFO] ${step_id} ${title} start"
set +e
bash -lc "${cmd}" > "${out_file}" 2>&1
local rc=$?
set -e
if [[ ${rc} -eq 0 ]]; then
STEP_RESULTS+=("${step_id}|PASS|${title}|${out_file}")
log "[PASS] ${step_id} rc=${rc}"
else
STEP_RESULTS+=("${step_id}|FAIL|${title}|${out_file}")
log "[FAIL] ${step_id} rc=${rc}"
fi
}
run_step \
"STEP-01" \
"Staging precheck and run_all" \
"cd \"${ROOT_DIR}\" && bash \"scripts/supply-gate/staging_precheck_and_run.sh\" \"${ENV_FILE}\""
run_step \
"STEP-02" \
"Superpowers release pipeline with staging env" \
"cd \"${ROOT_DIR}\" && STAGING_ENV_FILE=\"${ENV_FILE_REL}\" bash \"scripts/ci/superpowers_release_pipeline.sh\""
LATEST_STAGING_RUN_LOG="$(latest_file_or_empty "${OUT_DIR}/staging_run_*.log")"
LATEST_STAGE_REPORT="$(latest_file_or_empty "${OUT_DIR}/superpowers_stage_validation_*.md")"
LATEST_TOKEN_READINESS="$(latest_file_or_empty "${OUT_DIR}/token_runtime_readiness_*.md")"
LATEST_TOK007_REPORT="$(latest_file_or_empty "${ROOT_DIR}/review/outputs/tok007_release_recheck_*.md")"
LATEST_PIPELINE_REPORT="$(latest_file_or_empty "${OUT_DIR}/superpowers_release_pipeline_*.md")"
SEC_REPORT="${ROOT_DIR}/tests/supply/sec_sup_boundary_report_2026-03-30.md"
run_step \
"STEP-03" \
"Staging evidence autofill" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/staging_evidence_autofill.sh\" \
--staging-run-log \"${LATEST_STAGING_RUN_LOG}\" \
--stage-report \"${LATEST_STAGE_REPORT}\" \
--token-readiness \"${LATEST_TOKEN_READINESS}\" \
--tok007-report \"${LATEST_TOK007_REPORT}\" \
--pipeline-report \"${LATEST_PIPELINE_REPORT}\" \
--sec-report \"${SEC_REPORT}\""
HAS_FAIL=0
for row in "${STEP_RESULTS[@]}"; do
status="$(echo "${row}" | awk -F'|' '{print $2}')"
if [[ "${status}" == "FAIL" ]]; then
HAS_FAIL=1
fi
done
RESULT="PASS"
NOTE="all steps finished"
if [[ "${HAS_FAIL}" -eq 1 ]]; then
RESULT="FAIL"
NOTE="at least one step failed"
fi
{
echo "# Staging 发布流水报告"
echo
echo "- 时间戳:${TS}"
echo "- 执行脚本:\`scripts/ci/staging_release_pipeline.sh\`"
echo "- 环境文件:\`${ENV_FILE_REL}\`"
echo "- 环境分类:\`${ENV_CLASSIFICATION}\`"
echo "- local/mock 显式确认:\`${ALLOW_LOCAL_MOCK_STAGING}\`"
echo "- 结果:**${RESULT}**"
echo "- 说明:${NOTE}"
echo
echo "## 步骤结果"
echo
echo "| 步骤 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for row in "${STEP_RESULTS[@]}"; do
step_id="$(echo "${row}" | awk -F'|' '{print $1}')"
status="$(echo "${row}" | awk -F'|' '{print $2}')"
title="$(echo "${row}" | awk -F'|' '{print $3}')"
evidence="$(echo "${row}" | awk -F'|' '{print $4}')"
echo "| ${step_id} | ${status} | ${title} | ${evidence} |"
done
} > "${REPORT_FILE}"
log "[INFO] report=${REPORT_FILE}"
log "[RESULT] ${RESULT}"
if [[ "${RESULT}" == "FAIL" ]]; then
exit 1
fi

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
TS="$(date +%F_%H%M%S)"
TODAY_TAG="$(date +%F)"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
LOG_FILE="${OUT_DIR}/superpowers_release_pipeline_${TS}.log"
REPORT_FILE="${OUT_DIR}/superpowers_release_pipeline_${TS}.md"
ENABLE_MINIMAX_MONITORING="${ENABLE_MINIMAX_MONITORING:-0}"
MINIMAX_ENV_FILE="${MINIMAX_ENV_FILE:-scripts/supply-gate/.env.minimax-dev}"
MINIMAX_RUN_ACTIVE_SMOKE="${MINIMAX_RUN_ACTIVE_SMOKE:-0}"
log() {
echo "$1" | tee -a "${LOG_FILE}"
}
STEP_RESULTS=()
run_step() {
local step_id="$1"
local title="$2"
local cmd="$3"
log "[INFO] ${step_id} ${title} start"
set +e
bash -lc "${cmd}" > "${OUT_DIR}/${step_id,,}_${TS}.out.log" 2>&1
local rc=$?
set -e
local evidence="${OUT_DIR}/${step_id,,}_${TS}.out.log"
if [[ "${rc}" -eq 0 ]]; then
log "[PASS] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|PASS|${title}|${evidence}")
else
if [[ "${step_id}" == "STEP-03" ]]; then
# final decision consistency check can return WARN via exit 0; non-zero means parse failure only.
log "[FAIL] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|FAIL|${title}|${evidence}")
else
log "[FAIL] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|FAIL|${title}|${evidence}")
fi
fi
}
run_optional_step_non_blocking() {
local step_id="$1"
local title="$2"
local enabled="$3"
local cmd="$4"
if [[ "${enabled}" != "1" ]]; then
log "[SKIP] ${step_id} not enabled"
STEP_RESULTS+=("${step_id}|SKIP|${title}|not enabled")
return
fi
log "[INFO] ${step_id} ${title} start"
set +e
bash -lc "${cmd}" > "${OUT_DIR}/${step_id,,}_${TS}.out.log" 2>&1
local rc=$?
set -e
local evidence="${OUT_DIR}/${step_id,,}_${TS}.out.log"
if [[ "${rc}" -eq 0 ]]; then
log "[PASS] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|PASS|${title}|${evidence}")
else
# optional monitor step should not block release pipeline
log "[WARN] ${step_id} rc=${rc} (non-blocking)"
STEP_RESULTS+=("${step_id}|WARN|${title}|${evidence}")
fi
}
run_step \
"STEP-01" \
"Superpowers stage validation (PHASE-01~10)" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/superpowers_stage_validate.sh\""
run_step \
"STEP-02" \
"TOK-007 release recheck" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/tok007_release_recheck.sh\""
run_step \
"STEP-03" \
"Final decision consistency check" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/final_decision_consistency_check.sh\""
run_step \
"STEP-04" \
"Generate final decision candidate from TOK-007" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/tok007_generate_final_decision_candidate.sh\""
run_optional_step_non_blocking \
"STEP-05" \
"Optional Minimax upstream monitoring snapshot+trend" \
"${ENABLE_MINIMAX_MONITORING}" \
"cd \"${ROOT_DIR}\" && RUN_ACTIVE_SMOKE=\"${MINIMAX_RUN_ACTIVE_SMOKE}\" bash \"scripts/ci/minimax_upstream_daily_snapshot.sh\" \"${TODAY_TAG}\" \"${MINIMAX_ENV_FILE}\" && bash \"scripts/ci/minimax_upstream_trend_report.sh\" \"${TODAY_TAG}\""
has_fail=0
for row in "${STEP_RESULTS[@]}"; do
status="$(echo "${row}" | awk -F'|' '{print $2}')"
if [[ "${status}" == "FAIL" ]]; then
has_fail=1
fi
done
PIPELINE_RESULT="PASS"
PIPELINE_NOTE="all steps finished"
if [[ "${has_fail}" -eq 1 ]]; then
PIPELINE_RESULT="FAIL"
PIPELINE_NOTE="at least one step failed"
fi
{
echo "# Superpowers 发布流水执行报告"
echo
echo "- 时间戳:${TS}"
echo "- 执行脚本:\`scripts/ci/superpowers_release_pipeline.sh\`"
echo "- 结果:**${PIPELINE_RESULT}**"
echo "- 说明:${PIPELINE_NOTE}"
echo "- Minimax 监控步开关:\`${ENABLE_MINIMAX_MONITORING}\`(非阻断)"
echo "- Minimax 监控环境:\`${MINIMAX_ENV_FILE}\`"
echo "- Minimax 实时探测:\`${MINIMAX_RUN_ACTIVE_SMOKE}\`"
echo
echo "## 步骤结果"
echo
echo "| 步骤 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for row in "${STEP_RESULTS[@]}"; do
step_id="$(echo "${row}" | awk -F'|' '{print $1}')"
status="$(echo "${row}" | awk -F'|' '{print $2}')"
title="$(echo "${row}" | awk -F'|' '{print $3}')"
evidence="$(echo "${row}" | awk -F'|' '{print $4}')"
echo "| ${step_id} | ${status} | ${title} | ${evidence} |"
done
} > "${REPORT_FILE}"
log "[INFO] pipeline report generated: ${REPORT_FILE}"
log "[RESULT] ${PIPELINE_RESULT}"
if [[ "${PIPELINE_RESULT}" == "FAIL" ]]; then
exit 1
fi

View File

@@ -0,0 +1,253 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
TS="$(date +%F_%H%M%S)"
OUT_DIR="${ROOT_DIR}/reports/gates"
ART_DIR="${ROOT_DIR}/tests/supply/artifacts/superpowers_stage_validation_${TS}"
REPORT_FILE="${OUT_DIR}/superpowers_stage_validation_${TS}.md"
LOG_FILE="${OUT_DIR}/superpowers_stage_validation_${TS}.log"
GO_BIN="${ROOT_DIR}/.tools/go-current/bin/go"
DEP_AUDIT_DATE="${DEP_AUDIT_DATE:-2026-03-27}"
STAGE_DRILL_DATE="${STAGE_DRILL_DATE:-$(date +%F)}"
STAGING_ENV_FILE="${STAGING_ENV_FILE:-scripts/supply-gate/.env}"
mkdir -p "${OUT_DIR}" "${ART_DIR}"
: > "${LOG_FILE}"
log() {
echo "$1" | tee -a "${LOG_FILE}"
}
is_mock_staging_env() {
local env_path="$1"
if [[ -z "${env_path}" ]]; then
return 1
fi
if [[ "${env_path}" != /* ]]; then
env_path="${ROOT_DIR}/${env_path}"
fi
if [[ ! -f "${env_path}" ]]; then
return 1
fi
if echo "${env_path}" | grep -Eiq 'local-mock'; then
return 0
fi
local api_base
api_base="$(grep -E '^API_BASE_URL=' "${env_path}" | head -n 1 | cut -d'=' -f2- | tr -d '\"' || true)"
if echo "${api_base}" | grep -Eiq '127\.0\.0\.1|localhost'; then
return 0
fi
return 1
}
STEP_RESULTS=()
run_step() {
local step_id="$1"
local title="$2"
local cmd="$3"
local out_file="$4"
log "[INFO] ${step_id} ${title} start"
set +e
bash -lc "${cmd}" > "${out_file}" 2>&1
local rc=$?
set -e
if [[ ${rc} -eq 0 ]]; then
log "[PASS] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|PASS|${title}|${out_file}")
else
log "[FAIL] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|FAIL|${title}|${out_file}")
fi
}
run_step_allow_deferred() {
local step_id="$1"
local title="$2"
local cmd="$3"
local out_file="$4"
local deferred_pattern="$5"
log "[INFO] ${step_id} ${title} start"
set +e
bash -lc "${cmd}" > "${out_file}" 2>&1
local rc=$?
set -e
if [[ ${rc} -eq 0 ]]; then
log "[PASS] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|PASS|${title}|${out_file}")
return
fi
if grep -Eiq "${deferred_pattern}" "${out_file}"; then
log "[DEFERRED] ${step_id} rc=${rc} matched expected pattern"
STEP_RESULTS+=("${step_id}|DEFERRED|${title}|${out_file}")
return
fi
log "[FAIL] ${step_id} rc=${rc}"
STEP_RESULTS+=("${step_id}|FAIL|${title}|${out_file}")
}
ensure_mock_server() {
if curl -sS -m 2 "http://127.0.0.1:18080/actuator/health" >/dev/null 2>&1; then
echo "already_running"
return
fi
nohup python3 "${ROOT_DIR}/scripts/mock/supply_gateway_mock_server.py" > "${ART_DIR}/mock_server.log" 2>&1 &
local pid=$!
for _ in {1..20}; do
if curl -sS -m 2 "http://127.0.0.1:18080/actuator/health" >/dev/null 2>&1; then
echo "${pid}"
return
fi
sleep 0.2
done
echo "failed"
}
MOCK_PID="$(ensure_mock_server)"
if [[ "${MOCK_PID}" == "failed" ]]; then
log "[FAIL] cannot start mock server on 127.0.0.1:18080"
exit 1
fi
if [[ "${MOCK_PID}" != "already_running" ]]; then
log "[INFO] mock server started with pid=${MOCK_PID}"
trap 'kill "${MOCK_PID}" >/dev/null 2>&1 || true' EXIT
else
log "[INFO] mock server already running"
fi
if [[ ! -x "${GO_BIN}" ]]; then
GO_BIN="$(command -v go || true)"
fi
if [[ -z "${GO_BIN}" ]]; then
log "[FAIL] go binary not found"
exit 1
fi
run_step \
"PHASE-01" \
"TOK runtime code tests" \
"cd \"${ROOT_DIR}/platform-token-runtime\" && export PATH=\"$(dirname "${GO_BIN}"):\$PATH\" && export GOCACHE=\"${ROOT_DIR}/.tools/go-cache\" && export GOPATH=\"${ROOT_DIR}/.tools/go\" && \"${GO_BIN}\" test ./..." \
"${ART_DIR}/phase01_go_test.log"
run_step \
"PHASE-02" \
"SUP local-mock run_all execution" \
"cd \"${ROOT_DIR}\" && bash \"scripts/supply-gate/run_all.sh\" \"scripts/supply-gate/.env.local-mock\"" \
"${ART_DIR}/phase02_sup_run_all_mock.log"
run_step \
"PHASE-03" \
"TOK-005 boundary dry-run on local-mock env" \
"cd \"${ROOT_DIR}\" && bash \"scripts/supply-gate/tok005_boundary_dryrun.sh\" \"scripts/supply-gate/.env.local-mock\"" \
"${ART_DIR}/phase03_tok005_dryrun_mock.log"
run_step \
"PHASE-04" \
"TOK-006 gate bundle aggregation" \
"cd \"${ROOT_DIR}\" && ENABLE_SUP_RUN=0 ENABLE_TOK005_DRYRUN=1 bash \"scripts/supply-gate/tok006_gate_bundle.sh\" \"scripts/supply-gate/.env.local-mock\"" \
"${ART_DIR}/phase04_tok006_bundle.log"
run_step \
"PHASE-05" \
"Dependency audit gate validation" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/dependency-audit-check.sh\" \"${DEP_AUDIT_DATE}\"" \
"${ART_DIR}/phase05_dependency_audit.log"
run_step \
"PHASE-06" \
"Stage gate rollback drill" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/stage-gate-drill.sh\" \"G3\" \"${STAGE_DRILL_DATE}\"" \
"${ART_DIR}/phase06_stage_gate_drill.log"
run_step_allow_deferred \
"PHASE-07" \
"Real staging precheck (expected deferred before real secrets)" \
"cd \"${ROOT_DIR}\" && bash \"scripts/supply-gate/staging_precheck_and_run.sh\" \"${STAGING_ENV_FILE}\"" \
"${ART_DIR}/phase07_staging_precheck.log" \
"placeholder token detected|placeholder API_BASE_URL|missing env var"
run_step \
"PHASE-08" \
"Daily metrics snapshot for M-017/M-018/M-019" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/metrics_daily_snapshot.sh\" \"$(date +%F)\"" \
"${ART_DIR}/phase08_metrics_snapshot.log"
run_step \
"PHASE-09" \
"7-day metrics trend report generation" \
"cd \"${ROOT_DIR}\" && bash \"scripts/ci/metrics_trend_report.sh\" \"$(date +%F)\"" \
"${ART_DIR}/phase09_metrics_trend.log"
run_step \
"PHASE-10" \
"Token runtime readiness check (M-021)" \
"cd \"${ROOT_DIR}\" && ENABLE_TOKEN_RUNTIME_SMOKE=1 bash \"scripts/ci/token_runtime_readiness_check.sh\" \"$(date +%F)\"" \
"${ART_DIR}/phase10_token_runtime_readiness.log"
has_fail=0
has_deferred=0
for row in "${STEP_RESULTS[@]}"; do
status="$(echo "${row}" | awk -F'|' '{print $2}')"
if [[ "${status}" == "FAIL" ]]; then
has_fail=1
fi
if [[ "${status}" == "DEFERRED" ]]; then
has_deferred=1
fi
done
DECISION="GO"
DECISION_REASON="all phases passed"
if [[ "${has_fail}" -eq 1 ]]; then
DECISION="NO_GO"
DECISION_REASON="at least one phase failed"
elif [[ "${has_deferred}" -eq 1 ]]; then
DECISION="CONDITIONAL_GO"
DECISION_REASON="all executable phases passed but real staging phase is deferred"
fi
if is_mock_staging_env "${STAGING_ENV_FILE}" && [[ "${DECISION}" == "GO" ]]; then
DECISION="CONDITIONAL_GO"
DECISION_REASON="all phases passed but PHASE-07 used local/mock staging env"
fi
{
echo "# Superpowers 阶段验证报告"
echo
echo "- 时间戳:${TS}"
echo "- 执行脚本:\`scripts/ci/superpowers_stage_validate.sh\`"
echo "- 决策:**${DECISION}**"
echo "- 决策依据:${DECISION_REASON}"
echo
echo "## 阶段结果"
echo
echo "| 阶段 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for row in "${STEP_RESULTS[@]}"; do
step_id="$(echo "${row}" | awk -F'|' '{print $1}')"
status="$(echo "${row}" | awk -F'|' '{print $2}')"
title="$(echo "${row}" | awk -F'|' '{print $3}')"
evidence="$(echo "${row}" | awk -F'|' '{print $4}')"
echo "| ${step_id} | ${status} | ${title} | ${evidence} |"
done
echo
echo "## 说明"
echo
echo "1. PHASE-07 为真实 staging 验证阶段,在占位凭证场景下允许 DEFERRED不得伪造 PASS。"
echo "2. PHASE-08/09 负责 M-017/M-018/M-019 的每日快照与趋势证据生成。"
echo "3. PHASE-10 负责 M-021 token 运行态就绪度计算。"
echo "4. 其余阶段均为可执行验证,必须以命令返回码与证据文件为准。"
} > "${REPORT_FILE}"
log "[INFO] report generated: ${REPORT_FILE}"
log "[RESULT] ${DECISION}"
if [[ "${DECISION}" == "NO_GO" ]]; then
exit 1
fi

View File

@@ -0,0 +1,74 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
TS="$(date +%F_%H%M%S)"
OUT_DIR="${ROOT_DIR}/review/outputs"
mkdir -p "${OUT_DIR}"
SOURCE_FILE="${ROOT_DIR}/review/final_decision_2026-03-31.md"
TOK007_FILE="$(ls -1t ${ROOT_DIR}/review/outputs/tok007_release_recheck_*.md 2>/dev/null | head -n 1 || true)"
OUT_FILE="${OUT_DIR}/final_decision_candidate_from_tok007_${TS}.md"
LOG_FILE="${ROOT_DIR}/reports/gates/tok007_generate_candidate_${TS}.log"
if [[ ! -f "${SOURCE_FILE}" ]]; then
echo "[FAIL] source final decision missing: ${SOURCE_FILE}" | tee "${LOG_FILE}"
exit 1
fi
if [[ -z "${TOK007_FILE}" || ! -f "${TOK007_FILE}" ]]; then
echo "[FAIL] tok007 recheck report missing" | tee "${LOG_FILE}"
exit 1
fi
DECISION="UNKNOWN"
if grep -q '机判结论:\*\*CONDITIONAL_GO\*\*' "${TOK007_FILE}"; then
DECISION="CONDITIONAL_GO"
elif grep -q '机判结论:\*\*NO_GO\*\*' "${TOK007_FILE}"; then
DECISION="NO_GO"
elif grep -q '机判结论:\*\*GO\*\*' "${TOK007_FILE}"; then
DECISION="GO"
fi
if [[ "${DECISION}" == "UNKNOWN" ]]; then
echo "[FAIL] cannot parse decision from ${TOK007_FILE}" | tee "${LOG_FILE}"
exit 1
fi
cp "${SOURCE_FILE}" "${OUT_FILE}"
# reset three checkboxes
sed -i 's/^- \[x\] GO/- [ ] GO/g' "${OUT_FILE}"
sed -i 's/^- \[x\] CONDITIONAL GO/- [ ] CONDITIONAL GO/g' "${OUT_FILE}"
sed -i 's/^- \[x\] NO-GO/- [ ] NO-GO/g' "${OUT_FILE}"
sed -i 's/^- \[x\] 通过/- [ ] 通过/g' "${OUT_FILE}"
sed -i 's/^- \[x\] 有条件通过/- [ ] 有条件通过/g' "${OUT_FILE}"
sed -i 's/^- \[x\] 不通过/- [ ] 不通过/g' "${OUT_FILE}"
case "${DECISION}" in
GO)
sed -i '0,/^- \[ \] GO/s//- [x] GO/' "${OUT_FILE}"
;;
CONDITIONAL_GO)
sed -i '0,/^- \[ \] CONDITIONAL GO/s//- [x] CONDITIONAL GO/' "${OUT_FILE}"
;;
NO_GO)
sed -i '0,/^- \[ \] NO-GO/s//- [x] NO-GO/' "${OUT_FILE}"
;;
esac
{
echo
echo "## 附录TOK-007 自动复审回填(${TS}"
echo
echo "1. 自动复审来源:\`${TOK007_FILE}\`"
echo "2. 自动复审结论:\`${DECISION}\`"
echo "3. 说明:该候选稿用于人工审阅与签署准备,不直接替代正式签署版本。"
} >> "${OUT_FILE}"
{
echo "[INFO] source=${SOURCE_FILE}"
echo "[INFO] tok007=${TOK007_FILE}"
echo "[RESULT] decision=${DECISION}"
echo "[INFO] output=${OUT_FILE}"
} | tee "${LOG_FILE}"

View File

@@ -0,0 +1,183 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
TS="$(date +%F_%H%M%S)"
OUT_DIR="${ROOT_DIR}/review/outputs"
mkdir -p "${OUT_DIR}"
OUT_FILE="${OUT_DIR}/tok007_release_recheck_${TS}.md"
LOG_FILE="${ROOT_DIR}/reports/gates/tok007_release_recheck_${TS}.log"
log() {
echo "$1" | tee -a "${LOG_FILE}"
}
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
extract_md_checkbox_conclusion() {
local file="$1"
local go="0"
local cgo="0"
local nogo="0"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
grep -Eq '^- \[x\] (通过|GO)' "${file}" && go="1" || true
grep -Eq '^- \[x\] (有条件通过|CONDITIONAL GO)' "${file}" && cgo="1" || true
grep -Eq '^- \[x\] (不通过|NO-GO)' "${file}" && nogo="1" || true
if [[ "${go}" == "1" ]]; then
echo "GO"
return
fi
if [[ "${cgo}" == "1" ]]; then
echo "CONDITIONAL_GO"
return
fi
if [[ "${nogo}" == "1" ]]; then
echo "NO_GO"
return
fi
echo "UNKNOWN"
}
extract_bold_decision() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
local row
row="$(grep -E '^- (决策|判定)\*\*' "${file}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "UNKNOWN"
return
fi
if echo "${row}" | grep -q 'NO_GO'; then
echo "NO_GO"
return
fi
if echo "${row}" | grep -q 'CONDITIONAL_GO'; then
echo "CONDITIONAL_GO"
return
fi
if echo "${row}" | grep -q 'GO'; then
echo "GO"
return
fi
echo "UNKNOWN"
}
extract_superpowers_decision() {
local file="$1"
extract_bold_decision "${file}"
}
extract_pass_fail_result() {
local file="$1"
if [[ ! -f "${file}" ]]; then
echo "UNKNOWN"
return
fi
if grep -Eq '^- 结果:\*\*PASS\*\*' "${file}"; then
echo "PASS"
return
fi
if grep -Eq '^- 结果:\*\*FAIL\*\*' "${file}"; then
echo "FAIL"
return
fi
echo "UNKNOWN"
}
TOK006_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/tok006_gate_bundle_*.md")"
SP_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/superpowers_stage_validation_*.md")"
TOK_RUNTIME_READINESS_REPORT="$(latest_file_or_empty "${ROOT_DIR}/reports/gates/token_runtime_readiness_*.md")"
SUP_REVIEW_REPORT="${ROOT_DIR}/reports/supply_gate_review_2026-03-31.md"
FINAL_DECISION_REPORT="${ROOT_DIR}/review/final_decision_2026-03-31.md"
TOK006_DECISION="$(extract_bold_decision "${TOK006_REPORT}")"
SP_DECISION="$(extract_superpowers_decision "${SP_REPORT}")"
TOK_RUNTIME_READINESS_RESULT="$(extract_pass_fail_result "${TOK_RUNTIME_READINESS_REPORT}")"
SUP_DECISION="$(extract_md_checkbox_conclusion "${SUP_REVIEW_REPORT}")"
FINAL_DECISION_CURRENT="$(extract_md_checkbox_conclusion "${FINAL_DECISION_REPORT}")"
has_unknown=0
if [[ "${TOK006_DECISION}" == "UNKNOWN" || "${SP_DECISION}" == "UNKNOWN" || "${TOK_RUNTIME_READINESS_RESULT}" == "UNKNOWN" || "${SUP_DECISION}" == "UNKNOWN" ]]; then
has_unknown=1
fi
DECISION="CONDITIONAL_GO"
DECISION_REASON="all available checks are non-failing but at least one source is conditional/mock/deferred"
if [[ "${TOK006_DECISION}" == "NO_GO" || "${SP_DECISION}" == "NO_GO" || "${TOK_RUNTIME_READINESS_RESULT}" == "FAIL" || "${SUP_DECISION}" == "NO_GO" ]]; then
DECISION="NO_GO"
DECISION_REASON="at least one upstream gate is NO_GO"
elif [[ "${TOK006_DECISION}" == "GO" && "${SP_DECISION}" == "GO" && "${TOK_RUNTIME_READINESS_RESULT}" == "PASS" && "${SUP_DECISION}" == "GO" ]]; then
DECISION="GO"
DECISION_REASON="all upstream gates report GO"
elif [[ "${has_unknown}" -eq 1 ]]; then
DECISION="NO_GO"
DECISION_REASON="missing/unknown upstream decision source"
fi
RECOMMEND_ACTION_1="补齐真实 staging 参数后执行 scripts/supply-gate/staging_precheck_and_run.sh"
RECOMMEND_ACTION_2="重跑 scripts/ci/superpowers_stage_validate.sh 并确认 PHASE-07=PASS"
RECOMMEND_ACTION_3="更新 reports/supply_gate_review_2026-03-31.md 与 review/final_decision_2026-03-31.md 签署页"
cat > "${OUT_FILE}" <<EOF
# TOK-007 发布门禁复审报告
- 时间戳:${TS}
- 生成脚本:\`scripts/ci/tok007_release_recheck.sh\`
## 1. 输入证据
| 来源 | 路径 | 判定 |
|---|---|---|
| TOK-006 Gate 汇总 | ${TOK006_REPORT:-N/A} | ${TOK006_DECISION} |
| Superpowers 阶段验证 | ${SP_REPORT:-N/A} | ${SP_DECISION} |
| Token Runtime Readiness (M-021) | ${TOK_RUNTIME_READINESS_REPORT:-N/A} | ${TOK_RUNTIME_READINESS_RESULT} |
| SUP Gate 汇总评审 | ${SUP_REVIEW_REPORT} | ${SUP_DECISION} |
| 当前最终决议文档 | ${FINAL_DECISION_REPORT} | ${FINAL_DECISION_CURRENT} |
## 2. 复审结论
- [ ] GO
- [ ] CONDITIONAL GO
- [ ] NO-GO
- 机判结论:**${DECISION}**
- 结论依据:${DECISION_REASON}
## 3. 状态建议
1. ${RECOMMEND_ACTION_1}
2. ${RECOMMEND_ACTION_2}
3. ${RECOMMEND_ACTION_3}
EOF
case "${DECISION}" in
GO)
sed -i 's/^- \[ \] GO/- [x] GO/' "${OUT_FILE}"
;;
CONDITIONAL_GO)
sed -i 's/^- \[ \] CONDITIONAL GO/- [x] CONDITIONAL GO/' "${OUT_FILE}"
;;
NO_GO)
sed -i 's/^- \[ \] NO-GO/- [x] NO-GO/' "${OUT_FILE}"
;;
esac
log "[INFO] TOK006=${TOK006_DECISION}, SP=${SP_DECISION}, M021=${TOK_RUNTIME_READINESS_RESULT}, SUP=${SUP_DECISION}, FINAL_CURRENT=${FINAL_DECISION_CURRENT}"
log "[RESULT] ${DECISION}"
log "[INFO] output=${OUT_FILE}"
if [[ "${DECISION}" == "NO_GO" ]]; then
exit 1
fi

View File

@@ -0,0 +1,213 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)"
DATE_TAG="${1:-$(date +%F)}"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
TS_TIME="$(date +%H%M%S)"
REPORT_FILE="${OUT_DIR}/token_runtime_readiness_${DATE_TAG}_${TS_TIME}.md"
LOG_FILE="${OUT_DIR}/token_runtime_readiness_${DATE_TAG}_${TS_TIME}.log"
GO_BIN="${ROOT_DIR}/.tools/go-current/bin/go"
if [[ ! -x "${GO_BIN}" ]]; then
GO_BIN="$(command -v go || true)"
fi
if [[ -z "${GO_BIN}" ]]; then
echo "[FAIL] go binary not found" | tee -a "${LOG_FILE}"
exit 1
fi
CHECK_IDS=()
CHECK_STATUS=()
CHECK_DESC=()
CHECK_EVIDENCE=()
add_check() {
CHECK_IDS+=("$1")
CHECK_STATUS+=("$2")
CHECK_DESC+=("$3")
CHECK_EVIDENCE+=("$4")
}
check_file() {
local id="$1"
local desc="$2"
local file="$3"
if [[ -f "${file}" ]]; then
add_check "${id}" "PASS" "${desc}" "${file}"
else
add_check "${id}" "FAIL" "${desc}" "${file} (missing)"
fi
}
check_pattern() {
local id="$1"
local desc="$2"
local file="$3"
local pattern="$4"
if [[ -f "${file}" ]] && grep -Eq "${pattern}" "${file}"; then
add_check "${id}" "PASS" "${desc}" "${file}"
else
add_check "${id}" "FAIL" "${desc}" "${file} (pattern missing)"
fi
}
check_file "TOK-REAL-001-C1" "Token API 可执行入口存在" "${ROOT_DIR}/platform-token-runtime/cmd/platform-token-runtime/main.go"
check_file "TOK-REAL-001-C2" "Token HTTP 契约处理实现存在" "${ROOT_DIR}/platform-token-runtime/internal/httpapi/token_api.go"
check_file "TOK-REAL-001-C3" "Token 生命周期运行时实现存在" "${ROOT_DIR}/platform-token-runtime/internal/auth/service/inmemory_runtime.go"
check_file "TOK-REAL-001-C4" "TOK 生命周期可执行测试存在" "${ROOT_DIR}/platform-token-runtime/internal/token/lifecycle_executable_test.go"
check_file "TOK-REAL-001-C5" "TOK 审计可执行测试存在" "${ROOT_DIR}/platform-token-runtime/internal/token/audit_executable_test.go"
check_file "TOK-REAL-003-C1" "可部署镜像构建工件存在" "${ROOT_DIR}/platform-token-runtime/Dockerfile"
check_file "TOK-REAL-003-C2" "平台 token OpenAPI 契约存在" "${ROOT_DIR}/docs/platform_token_api_contract_openapi_draft_v1_2026-03-29.yaml"
check_pattern "TOK-REAL-002-C1" "审计事件查询接口已落地OpenAPI" "${ROOT_DIR}/docs/platform_token_api_contract_openapi_draft_v1_2026-03-29.yaml" "/api/v1/platform/tokens/audit-events:"
check_pattern "TOK-REAL-002-C2" "审计事件查询接口已落地(代码)" "${ROOT_DIR}/platform-token-runtime/internal/httpapi/token_api.go" "handleAuditEvents"
check_file "TOK-REAL-003-C3" "token runtime 持久化表结构工件存在" "${ROOT_DIR}/sql/postgresql/token_runtime_schema_v1.sql"
GO_TEST_LOG="${OUT_DIR}/token_runtime_go_test_${DATE_TAG}_${TS_TIME}.log"
if (cd "${ROOT_DIR}/platform-token-runtime" && export PATH="$(dirname "${GO_BIN}"):$PATH" && export GOCACHE="${ROOT_DIR}/.tools/go-cache" && export GOPATH="${ROOT_DIR}/.tools/go" && "${GO_BIN}" test ./... >"${GO_TEST_LOG}" 2>&1); then
add_check "TOK-REAL-001-C6" "PASS" "Token runtime 测试通过" "${GO_TEST_LOG}"
else
add_check "TOK-REAL-001-C6" "FAIL" "Token runtime 测试通过" "${GO_TEST_LOG}"
fi
GO_BUILD_LOG="${OUT_DIR}/token_runtime_go_build_${DATE_TAG}_${TS_TIME}.log"
BIN_PATH="${OUT_DIR}/token_runtime_bin_${DATE_TAG}_${TS_TIME}"
if (cd "${ROOT_DIR}/platform-token-runtime" && export PATH="$(dirname "${GO_BIN}"):$PATH" && export GOCACHE="${ROOT_DIR}/.tools/go-cache" && export GOPATH="${ROOT_DIR}/.tools/go" && "${GO_BIN}" build -o "${BIN_PATH}" ./cmd/platform-token-runtime >"${GO_BUILD_LOG}" 2>&1); then
add_check "TOK-REAL-001-C7" "PASS" "Token runtime 可构建" "${GO_BUILD_LOG}"
else
add_check "TOK-REAL-001-C7" "FAIL" "Token runtime 可构建" "${GO_BUILD_LOG}"
fi
SMOKE_LOG="${OUT_DIR}/token_runtime_smoke_${DATE_TAG}_${TS_TIME}.log"
is_port_in_use() {
local port="$1"
ss -ltn | awk '{print $4}' | grep -Eq "[:.]${port}$"
}
pick_smoke_port() {
local base="${1:-18082}"
local max_tries="${2:-50}"
local p="${base}"
local i=0
while [[ "${i}" -lt "${max_tries}" ]]; do
if ! is_port_in_use "${p}"; then
echo "${p}"
return 0
fi
p=$((p + 1))
i=$((i + 1))
done
return 1
}
SMOKE_PORT_BASE="${TOKEN_RUNTIME_SMOKE_PORT:-18082}"
if ! SMOKE_PORT="$(pick_smoke_port "${SMOKE_PORT_BASE}" "50")"; then
echo "[FAIL] no available smoke port from ${SMOKE_PORT_BASE} within 50 tries" > "${SMOKE_LOG}"
add_check "TOK-REAL-001-C8" "FAIL" "Token runtime 本地可运行冒烟通过" "${SMOKE_LOG}"
SMOKE_PORT=""
fi
if [[ "${ENABLE_TOKEN_RUNTIME_SMOKE:-0}" == "1" ]]; then
if [[ -n "${SMOKE_PORT}" ]]; then
set +e
(
echo "[INFO] start token runtime smoke on :${SMOKE_PORT}"
TOKEN_RUNTIME_ADDR=":${SMOKE_PORT}" "${BIN_PATH}" >"${SMOKE_LOG}.server" 2>&1 &
pid=$!
trap 'kill "${pid}" >/dev/null 2>&1 || true' EXIT
ready=0
for _ in {1..20}; do
if curl -sS -m 2 "http://127.0.0.1:${SMOKE_PORT}/actuator/health" | grep -q '\"UP\"'; then
ready=1
break
fi
sleep 0.2
done
if [[ "${ready}" -ne 1 ]]; then
echo "[FAIL] health check failed"
exit 1
fi
issue_code="$(curl -sS -m 3 -o "${SMOKE_LOG}.issue.json" -w "%{http_code}" \
-X POST "http://127.0.0.1:${SMOKE_PORT}/api/v1/platform/tokens/issue" \
-H "Content-Type: application/json" \
-H "X-Request-Id: req-smoke-issue" \
-H "Idempotency-Key: idem-smoke-issue" \
-d '{"subject_id":"smoke-user","role":"owner","ttl_seconds":300,"scope":["supply:*"]}')"
if [[ "${issue_code}" != "201" ]]; then
echo "[FAIL] issue status=${issue_code}"
exit 1
fi
audit_code="$(curl -sS -m 3 -o "${SMOKE_LOG}.audit.json" -w "%{http_code}" \
"http://127.0.0.1:${SMOKE_PORT}/api/v1/platform/tokens/audit-events?request_id=req-smoke-issue&limit=5" \
-H "X-Request-Id: req-smoke-audit")"
if [[ "${audit_code}" != "200" ]]; then
echo "[FAIL] audit query status=${audit_code}"
exit 1
fi
if ! grep -q '"event_name"' "${SMOKE_LOG}.audit.json"; then
echo "[FAIL] audit query payload missing event_name"
exit 1
fi
echo "[PASS] smoke passed"
) >"${SMOKE_LOG}" 2>&1
smoke_rc=$?
set -e
if [[ "${smoke_rc}" -eq 0 ]]; then
add_check "TOK-REAL-001-C8" "PASS" "Token runtime 本地可运行冒烟通过" "${SMOKE_LOG}"
else
add_check "TOK-REAL-001-C8" "FAIL" "Token runtime 本地可运行冒烟通过" "${SMOKE_LOG}"
fi
fi
else
add_check "TOK-REAL-001-C8" "PASS" "Token runtime 本地可运行冒烟(默认跳过,可通过 ENABLE_TOKEN_RUNTIME_SMOKE=1 开启)" "N/A"
fi
TOTAL="${#CHECK_IDS[@]}"
PASS_CNT=0
for status in "${CHECK_STATUS[@]}"; do
if [[ "${status}" == "PASS" ]]; then
PASS_CNT=$((PASS_CNT + 1))
fi
done
READINESS_PCT="$(awk -v p="${PASS_CNT}" -v t="${TOTAL}" 'BEGIN{if(t==0){printf "0.00"}else{printf "%.2f", (p/t)*100}}')"
RESULT="PASS"
if [[ "${READINESS_PCT}" != "100.00" ]]; then
RESULT="FAIL"
fi
{
echo "# Token Runtime Readiness Check (${DATE_TAG})"
echo
echo "- 时间戳:${DATE_TAG}_${TS_TIME}"
echo "- 指标M-021 token_runtime_readiness_pct"
echo "- 结果:**${RESULT}**"
echo "- 数值:${READINESS_PCT}% (${PASS_CNT}/${TOTAL})"
echo
echo "| 检查项 | 结果 | 说明 | 证据 |"
echo "|---|---|---|---|"
for i in "${!CHECK_IDS[@]}"; do
echo "| ${CHECK_IDS[$i]} | ${CHECK_STATUS[$i]} | ${CHECK_DESC[$i]} | ${CHECK_EVIDENCE[$i]} |"
done
echo
echo "## 结论"
echo
echo "1. 本报告仅评估 token 运行态实现就绪度,不替代真实 staging 联调结论。"
echo "2. 真实放行仍需结合 M-013~M-016、SUP-004~SUP-007 与 PHASE-07 实测。"
} > "${REPORT_FILE}"
{
echo "[INFO] report=${REPORT_FILE}"
echo "[INFO] readiness_pct=${READINESS_PCT}"
echo "[RESULT] ${RESULT}"
} | tee -a "${LOG_FILE}"
if [[ "${RESULT}" != "PASS" ]]; then
exit 1
fi

View File

@@ -0,0 +1,240 @@
#!/usr/bin/env python3
import json
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import parse_qs, urlparse
STATE = {
"next_account_id": 1000,
"next_package_id": 2000,
"next_settlement_id": 3000,
"accounts": {},
"packages": {},
"settlements": {},
}
def json_bytes(payload):
return json.dumps(payload, ensure_ascii=True).encode("utf-8")
class Handler(BaseHTTPRequestHandler):
server_version = "supply-mock/1.0"
def _read_json(self):
length = int(self.headers.get("Content-Length", "0"))
if length <= 0:
return {}
body = self.rfile.read(length)
try:
return json.loads(body.decode("utf-8"))
except Exception:
return {}
def _write(self, status, payload):
data = json_bytes(payload)
self.send_response(status)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
self.wfile.write(data)
def _ok(self, payload):
self._write(200, {"code": 0, "message": "ok", "data": payload})
def do_GET(self):
parsed = urlparse(self.path)
path = parsed.path
query = parse_qs(parsed.query)
if path.startswith("/api/v1/supply/accounts/") and path.endswith("/audit-logs"):
account_id = path.split("/")[5]
self._ok(
{
"items": [
{
"request_id": f"req-audit-{account_id}",
"action": "state_change",
"result": "success",
}
],
"page": 1,
"page_size": 20,
"total": 1,
}
)
return
if path == "/api/v1/supplier/billing":
self._ok(
{
"summary": {"total_amount": 123.45, "currency": "USD"},
"items": [],
"page": int(query.get("page", ["1"])[0]),
"page_size": int(query.get("page_size", ["20"])[0]),
"total": 0,
}
)
return
if path.startswith("/api/v1/supply/settlements/") and path.endswith("/statement"):
settlement_id = path.split("/")[5]
self._ok(
{
"settlement_id": int(settlement_id),
"download_url": f"http://127.0.0.1:18080/mock/statement/{settlement_id}.csv",
}
)
return
if path == "/api/v1/supply/earnings/records":
self._ok(
{
"items": [
{
"record_id": 1,
"amount": 10,
"currency_code": "USD",
"status": "available",
}
],
"page": int(query.get("page", ["1"])[0]),
"page_size": int(query.get("page_size", ["20"])[0]),
"total": 1,
}
)
return
if path == "/v1beta/models":
# External query key should be rejected.
self._write(
403,
{
"code": 403,
"message": "query key rejected",
"data": {"reason": "external_query_key_forbidden"},
},
)
return
if path == "/actuator/health":
self._write(200, {"status": "UP"})
return
self._write(404, {"code": 404, "message": "not found", "data": None})
def do_POST(self):
path = urlparse(self.path).path
payload = self._read_json()
if path == "/api/v1/supply/accounts/verify":
self._ok(
{
"verify_status": "pass",
"risk_level": "normal",
"provider": payload.get("provider", "openai"),
}
)
return
if path == "/api/v1/supply/accounts":
account_id = STATE["next_account_id"]
STATE["next_account_id"] += 1
STATE["accounts"][str(account_id)] = {"status": "pending"}
self._ok({"account_id": account_id, "status": "pending"})
return
if path.startswith("/api/v1/supply/accounts/") and path.endswith("/activate"):
account_id = path.split("/")[5]
STATE["accounts"].setdefault(account_id, {})["status"] = "active"
self._ok({"account_id": int(account_id), "status": "active"})
return
if path.startswith("/api/v1/supply/accounts/") and path.endswith("/suspend"):
account_id = path.split("/")[5]
STATE["accounts"].setdefault(account_id, {})["status"] = "suspended"
self._ok({"account_id": int(account_id), "status": "suspended"})
return
if path == "/api/v1/supply/packages/draft":
package_id = STATE["next_package_id"]
STATE["next_package_id"] += 1
STATE["packages"][str(package_id)] = {"status": "draft"}
self._ok({"package_id": package_id, "status": "draft"})
return
if path.startswith("/api/v1/supply/packages/") and path.endswith("/publish"):
package_id = path.split("/")[5]
STATE["packages"].setdefault(package_id, {})["status"] = "active"
self._ok({"package_id": int(package_id), "status": "active"})
return
if path.startswith("/api/v1/supply/packages/") and path.endswith("/pause"):
package_id = path.split("/")[5]
STATE["packages"].setdefault(package_id, {})["status"] = "paused"
self._ok({"package_id": int(package_id), "status": "paused"})
return
if path.startswith("/api/v1/supply/packages/") and path.endswith("/unlist"):
package_id = path.split("/")[5]
STATE["packages"].setdefault(package_id, {})["status"] = "expired"
self._ok({"package_id": int(package_id), "status": "expired"})
return
if path == "/api/v1/supply/packages/batch-price":
items = payload.get("items", [])
self._ok(
{
"total": len(items),
"success_count": len(items),
"failed_count": 0,
"failed_items": [],
}
)
return
if path.startswith("/api/v1/supply/packages/") and path.endswith("/clone"):
package_id = STATE["next_package_id"]
STATE["next_package_id"] += 1
STATE["packages"][str(package_id)] = {"status": "draft"}
self._ok({"package_id": package_id, "status": "draft"})
return
if path == "/api/v1/supply/settlements/withdraw":
settlement_id = STATE["next_settlement_id"]
STATE["next_settlement_id"] += 1
STATE["settlements"][str(settlement_id)] = {"status": "pending"}
self._ok({"settlement_id": settlement_id, "status": "pending"})
return
if path.startswith("/api/v1/supply/settlements/") and path.endswith("/cancel"):
settlement_id = path.split("/")[5]
STATE["settlements"].setdefault(settlement_id, {})["status"] = "cancelled"
self._ok({"settlement_id": int(settlement_id), "status": "cancelled"})
return
if path == "/api/v1/chat/completions":
self._ok(
{
"id": "chatcmpl-mock-001",
"object": "chat.completion",
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": "pong"},
"finish_reason": "stop",
}
],
}
)
return
self._write(404, {"code": 404, "message": "not found", "data": None})
def log_message(self, format, *args):
return
if __name__ == "__main__":
server = HTTPServer(("127.0.0.1", 18080), Handler)
server.serve_forever()

View File

@@ -0,0 +1,207 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
ENV_FILE="${1:-${SCRIPT_DIR}/.env.minimax-dev}"
OUT_DIR="${ROOT_DIR}/reports/gates"
ART_DIR_BASE="${ROOT_DIR}/tests/supply/artifacts"
TS="$(date +%F_%H%M%S)"
mkdir -p "${OUT_DIR}" "${ART_DIR_BASE}"
if [[ ! -f "${ENV_FILE}" ]]; then
echo "[FAIL] missing env file: ${ENV_FILE}"
exit 1
fi
# shellcheck disable=SC1090
source "${ENV_FILE}"
require_var() {
local n="$1"
if [[ -z "${!n:-}" ]]; then
echo "[FAIL] missing required env var: ${n}"
exit 1
fi
}
require_bin() {
local b="$1"
if ! command -v "${b}" >/dev/null 2>&1; then
echo "[FAIL] missing required binary: ${b}"
exit 1
fi
}
join_url() {
local base="$1"
local path="$2"
base="${base%/}"
if [[ "${path}" != /* ]]; then
path="/${path}"
fi
echo "${base}${path}"
}
classify_http_code() {
local code="$1"
case "${code}" in
200|201|202)
echo "PASS"
;;
400|422|429)
echo "PASS_AUTH_REACHED"
;;
401|403)
echo "FAIL_AUTH"
;;
404|405)
echo "FAIL_PATH"
;;
000)
echo "FAIL_NETWORK"
;;
*)
echo "FAIL_OTHER"
;;
esac
}
require_var API_BASE_URL
require_var OWNER_BEARER_TOKEN
require_bin curl
require_bin jq
MINIMAX_SMOKE_PATH="${MINIMAX_SMOKE_PATH:-/v1/messages}"
MINIMAX_SMOKE_MODEL="${MINIMAX_SMOKE_MODEL:-minimax-smoke-model}"
MINIMAX_TIMEOUT_SECONDS="${MINIMAX_TIMEOUT_SECONDS:-20}"
MINIMAX_SMOKE_DRY_RUN="${MINIMAX_SMOKE_DRY_RUN:-0}"
TARGET_URL="$(join_url "${API_BASE_URL}" "${MINIMAX_SMOKE_PATH}")"
ART_DIR="${ART_DIR_BASE}/minimax_smoke_${TS}"
mkdir -p "${ART_DIR}"
BASE_RESP_FILE="${ART_DIR}/01_base_probe_body.txt"
BASE_ERR_FILE="${ART_DIR}/01_base_probe_stderr.log"
ACTIVE_RESP_FILE="${ART_DIR}/02_active_probe_body.json"
ACTIVE_ERR_FILE="${ART_DIR}/02_active_probe_stderr.log"
REPORT_FILE="${OUT_DIR}/minimax_upstream_smoke_${TS}.md"
LOG_FILE="${OUT_DIR}/minimax_upstream_smoke_${TS}.log"
echo "[INFO] minimax smoke started ts=${TS}" | tee "${LOG_FILE}"
echo "[INFO] env_file=${ENV_FILE}" | tee -a "${LOG_FILE}"
echo "[INFO] api_base_url=${API_BASE_URL}" | tee -a "${LOG_FILE}"
echo "[INFO] target_url=${TARGET_URL}" | tee -a "${LOG_FILE}"
echo "[INFO] dry_run=${MINIMAX_SMOKE_DRY_RUN}" | tee -a "${LOG_FILE}"
if [[ "${MINIMAX_SMOKE_DRY_RUN}" == "1" ]]; then
{
echo "# Minimax 上游 Smoke 报告"
echo
echo "- 时间戳:${TS}"
echo "- 执行脚本:\`scripts/supply-gate/minimax_upstream_smoke.sh\`"
echo "- 环境文件:\`${ENV_FILE}\`"
echo "- API_BASE_URL\`${API_BASE_URL}\`"
echo "- 目标路径:\`${MINIMAX_SMOKE_PATH}\`"
echo "- 探测 URL\`${TARGET_URL}\`"
echo "- 总体结论:**PASS_DRY_RUN**"
echo
echo "## 1. 说明"
echo
echo "- 本次为 dry-run未发起任何外部网络请求。"
echo "- 用于流水联调与产物校验,不可替代真实上游验证证据。"
} > "${REPORT_FILE}"
{
echo "[INFO] report=${REPORT_FILE}"
echo "[RESULT] PASS_DRY_RUN"
} | tee -a "${LOG_FILE}"
exit 0
fi
BASE_HTTP_CODE="000"
BASE_RC=0
BASE_HTTP_CODE="$(curl -sS -m "${MINIMAX_TIMEOUT_SECONDS}" \
-o "${BASE_RESP_FILE}" \
-w '%{http_code}' \
"${API_BASE_URL}" 2>"${BASE_ERR_FILE}")" || BASE_RC=$?
ACTIVE_HTTP_CODE="000"
ACTIVE_RC=0
ACTIVE_PAYLOAD_FILE="${ART_DIR}/02_active_probe_request.json"
jq -n \
--arg model "${MINIMAX_SMOKE_MODEL}" \
'{model:$model,max_tokens:1,messages:[{role:"user",content:"ping"}]}' > "${ACTIVE_PAYLOAD_FILE}"
ACTIVE_HTTP_CODE="$(curl -sS -m "${MINIMAX_TIMEOUT_SECONDS}" \
-o "${ACTIVE_RESP_FILE}" \
-w '%{http_code}' \
-X POST "${TARGET_URL}" \
-H "Authorization: Bearer ${OWNER_BEARER_TOKEN}" \
-H "Content-Type: application/json" \
-H "anthropic-version: 2023-06-01" \
--data @"${ACTIVE_PAYLOAD_FILE}" 2>"${ACTIVE_ERR_FILE}")" || ACTIVE_RC=$?
BASE_CLASS="PASS_CONNECTIVITY"
if [[ "${BASE_RC}" -ne 0 ]]; then
BASE_CLASS="FAIL_NETWORK"
elif [[ "${BASE_HTTP_CODE}" == "000" ]]; then
BASE_CLASS="FAIL_NETWORK"
fi
ACTIVE_CLASS="FAIL_OTHER"
if [[ "${ACTIVE_RC}" -ne 0 ]]; then
ACTIVE_CLASS="FAIL_NETWORK"
else
ACTIVE_CLASS="$(classify_http_code "${ACTIVE_HTTP_CODE}")"
fi
OVERALL="PASS"
if [[ "${BASE_CLASS}" == FAIL_* ]] || [[ "${ACTIVE_CLASS}" == FAIL_* ]]; then
OVERALL="FAIL"
fi
{
echo "# Minimax 上游 Smoke 报告"
echo
echo "- 时间戳:${TS}"
echo "- 执行脚本:\`scripts/supply-gate/minimax_upstream_smoke.sh\`"
echo "- 环境文件:\`${ENV_FILE}\`"
echo "- API_BASE_URL\`${API_BASE_URL}\`"
echo "- 目标路径:\`${MINIMAX_SMOKE_PATH}\`"
echo "- 探测 URL\`${TARGET_URL}\`"
echo "- 总体结论:**${OVERALL}**"
echo
echo "## 1. Base 连通探测"
echo
echo "- curl rc${BASE_RC}"
echo "- http_code${BASE_HTTP_CODE}"
echo "- 分类:**${BASE_CLASS}**"
echo "- 产物:\`${BASE_RESP_FILE}\` / \`${BASE_ERR_FILE}\`"
echo
echo "## 2. Active 鉴权探测"
echo
echo "- curl rc${ACTIVE_RC}"
echo "- http_code${ACTIVE_HTTP_CODE}"
echo "- 分类:**${ACTIVE_CLASS}**"
echo "- 产物:\`${ACTIVE_PAYLOAD_FILE}\` / \`${ACTIVE_RESP_FILE}\` / \`${ACTIVE_ERR_FILE}\`"
echo
echo "## 3. 判定规则"
echo
echo "1. Base 探测仅判断连通curl 成功且非 \`000\` 记为 \`PASS_CONNECTIVITY\`。"
echo "2. Active 探测 \`2xx\` => PASS请求成功。"
echo "3. Active 探测 \`400/422/429\` => PASS_AUTH_REACHED已到达业务层通常说明鉴权头被接收。"
echo "4. Active 探测 \`401/403\` => FAIL_AUTH鉴权失败。"
echo "5. Active 探测 \`404/405\` => FAIL_PATH路径或方法不匹配。"
echo "6. 任一探测 \`000\` 或 curl 非零 => FAIL_NETWORK网络/解析/连接失败)。"
} > "${REPORT_FILE}"
{
echo "[INFO] report=${REPORT_FILE}"
echo "[INFO] base_http=${BASE_HTTP_CODE} active_http=${ACTIVE_HTTP_CODE}"
echo "[RESULT] ${OVERALL}"
} | tee -a "${LOG_FILE}"
if [[ "${OVERALL}" == "FAIL" ]]; then
exit 1
fi

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ENV_FILE="${1:-${SCRIPT_DIR}/.env}"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
OUT_DIR="${ROOT_DIR}/reports/gates"
mkdir -p "${OUT_DIR}"
TS="$(date +%F_%H%M%S)"
LOG_FILE="${OUT_DIR}/staging_run_${TS}.log"
# shellcheck disable=SC1090
source "${ENV_FILE}"
ENABLE_TOK005_DRYRUN="${ENABLE_TOK005_DRYRUN:-1}"
ENABLE_M021_PRECHECK="${ENABLE_M021_PRECHECK:-1}"
required=(API_BASE_URL OWNER_BEARER_TOKEN VIEWER_BEARER_TOKEN ADMIN_BEARER_TOKEN)
for v in "${required[@]}"; do
if [[ -z "${!v:-}" ]]; then
echo "[FAIL] missing env var: ${v}"
exit 1
fi
done
for t in "${OWNER_BEARER_TOKEN}" "${VIEWER_BEARER_TOKEN}" "${ADMIN_BEARER_TOKEN}"; do
if [[ "${t}" == replace-me-* ]]; then
echo "[FAIL] placeholder token detected; please fill real short-lived token"
exit 1
fi
done
if [[ "${API_BASE_URL}" == *"staging.example.com"* ]]; then
echo "[FAIL] placeholder API_BASE_URL detected: ${API_BASE_URL}"
exit 1
fi
echo "[INFO] precheck pass, API_BASE_URL=${API_BASE_URL}" | tee "${LOG_FILE}"
if [[ "${ENABLE_M021_PRECHECK}" == "1" ]]; then
echo "[INFO] run M-021 token runtime readiness precheck" | tee -a "${LOG_FILE}"
bash "${ROOT_DIR}/scripts/ci/token_runtime_readiness_check.sh" "$(date +%F)" | tee -a "${LOG_FILE}"
else
echo "[INFO] skip M-021 precheck by ENABLE_M021_PRECHECK=${ENABLE_M021_PRECHECK}" | tee -a "${LOG_FILE}"
fi
if [[ "${ENABLE_TOK005_DRYRUN}" == "1" ]]; then
echo "[INFO] run TOK-005 dry-run gate first" | tee -a "${LOG_FILE}"
bash "${SCRIPT_DIR}/tok005_boundary_dryrun.sh" "${ENV_FILE}" | tee -a "${LOG_FILE}"
else
echo "[INFO] skip TOK-005 dry-run gate by ENABLE_TOK005_DRYRUN=${ENABLE_TOK005_DRYRUN}" | tee -a "${LOG_FILE}"
fi
if ! curl -sS -m 5 -I "${API_BASE_URL}" >/dev/null; then
echo "[FAIL] API_BASE_URL unreachable: ${API_BASE_URL}" | tee -a "${LOG_FILE}"
exit 1
fi
echo "[INFO] reachable, start SUP run_all" | tee -a "${LOG_FILE}"
{
echo "== run_all begin =="
bash "${SCRIPT_DIR}/run_all.sh" "${ENV_FILE}"
echo "== run_all end =="
} | tee -a "${LOG_FILE}"
echo "[PASS] staging run complete: ${LOG_FILE}" | tee -a "${LOG_FILE}"

View File

@@ -0,0 +1,163 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
ENV_FILE="${1:-${SCRIPT_DIR}/.env}"
OUT_DIR="${ROOT_DIR}/reports/gates"
ART_ROOT="${ROOT_DIR}/tests/supply/artifacts"
TS="$(date +%F_%H%M%S)"
CASE_ID="tok005_dryrun_${TS}"
ART_DIR="${ART_ROOT}/${CASE_ID}"
REPORT_FILE="${OUT_DIR}/${CASE_ID}.md"
LOG_FILE="${OUT_DIR}/${CASE_ID}.log"
mkdir -p "${OUT_DIR}" "${ART_DIR}"
if [[ ! -f "${ENV_FILE}" ]]; then
echo "[FAIL] env file not found: ${ENV_FILE}" | tee -a "${LOG_FILE}"
exit 1
fi
# shellcheck disable=SC1090
source "${ENV_FILE}"
GO_BIN="${ROOT_DIR}/.tools/go-current/bin/go"
if [[ ! -x "${GO_BIN}" ]]; then
if command -v go >/dev/null 2>&1; then
GO_BIN="$(command -v go)"
else
echo "[FAIL] go binary not found. expected: ${ROOT_DIR}/.tools/go-current/bin/go" | tee -a "${LOG_FILE}"
exit 1
fi
fi
PLATFORM_RT_DIR="${ROOT_DIR}/platform-token-runtime"
if [[ ! -d "${PLATFORM_RT_DIR}" ]]; then
echo "[FAIL] missing runtime dir: ${PLATFORM_RT_DIR}" | tee -a "${LOG_FILE}"
exit 1
fi
{
echo "[INFO] TOK-005 dry-run started at ${TS}"
echo "[INFO] go bin: ${GO_BIN}"
"${GO_BIN}" version
} | tee "${LOG_FILE}"
GO_TEST_STATUS="PASS"
set +e
(
cd "${PLATFORM_RT_DIR}"
export PATH="$(dirname "${GO_BIN}"):${PATH}"
export GOCACHE="${ROOT_DIR}/.tools/go-cache"
export GOPATH="${ROOT_DIR}/.tools/go"
"${GO_BIN}" test ./...
) > "${ART_DIR}/go_test_output.txt" 2>&1
GO_TEST_RC=$?
set -e
if [[ "${GO_TEST_RC}" -ne 0 ]]; then
GO_TEST_STATUS="FAIL"
fi
cat "${ART_DIR}/go_test_output.txt" >> "${LOG_FILE}"
# M-016: query key 外拒能力静态检查
QUERY_KEY_STATUS="PASS"
if ! grep -Eq 'disallowedQueryKeys = \[\]string\{"key", "api_key", "token"\}' \
"${PLATFORM_RT_DIR}/internal/auth/middleware/query_key_reject_middleware.go"; then
QUERY_KEY_STATUS="FAIL"
fi
# M-013: 敏感值不落审计(用例断言存在性)
REDACTION_STATUS="PASS"
if ! grep -q 'TestTOKAud006QueryKeyRejectedEvent' "${PLATFORM_RT_DIR}/internal/token/audit_executable_test.go"; then
REDACTION_STATUS="FAIL"
fi
if ! grep -q 'must not contain raw query key value' "${PLATFORM_RT_DIR}/internal/token/audit_executable_test.go"; then
REDACTION_STATUS="FAIL"
fi
# TOK-LIFE/TOK-AUD 全量可执行用例覆盖检查
CASE_COVERAGE_STATUS="PASS"
for case_id in TOKLife001 TOKLife002 TOKLife003 TOKLife004 TOKLife005 TOKLife006 TOKLife007 TOKLife008; do
if ! grep -q "Test${case_id}" "${PLATFORM_RT_DIR}/internal/token/lifecycle_executable_test.go"; then
CASE_COVERAGE_STATUS="FAIL"
fi
done
for case_id in TOKAud001 TOKAud002 TOKAud003 TOKAud004 TOKAud005 TOKAud006 TOKAud007; do
if ! grep -q "Test${case_id}" "${PLATFORM_RT_DIR}/internal/token/audit_executable_test.go"; then
CASE_COVERAGE_STATUS="FAIL"
fi
done
# 真实 staging 准备度(当前阶段预期为 BLOCKED
LIVE_READY="YES"
LIVE_BLOCK_REASON=""
required=(API_BASE_URL OWNER_BEARER_TOKEN VIEWER_BEARER_TOKEN ADMIN_BEARER_TOKEN)
for v in "${required[@]}"; do
if [[ -z "${!v:-}" ]]; then
LIVE_READY="NO"
LIVE_BLOCK_REASON="missing ${v}"
break
fi
done
if [[ "${LIVE_READY}" == "YES" ]]; then
for t in "${OWNER_BEARER_TOKEN}" "${VIEWER_BEARER_TOKEN}" "${ADMIN_BEARER_TOKEN}"; do
if [[ "${t}" == replace-me-* ]]; then
LIVE_READY="NO"
LIVE_BLOCK_REASON="placeholder token detected"
break
fi
done
fi
if [[ "${LIVE_READY}" == "YES" && "${API_BASE_URL}" == *"example.com"* ]]; then
LIVE_READY="NO"
LIVE_BLOCK_REASON="placeholder API_BASE_URL detected"
fi
cat > "${REPORT_FILE}" <<EOF
# TOK-005 凭证边界 Dry-Run 报告
- 时间戳:${TS}
- 环境文件:${ENV_FILE}
- 用途:开发阶段预联调(不替代真实 staging 结论)
## 1. 结果总览
| 检查项 | 结果 | 说明 |
|---|---|---|
| Go 测试执行 | ${GO_TEST_STATUS} | \`go test ./...\` 输出见 artifacts |
| Query Key 外拒检查M-016 | ${QUERY_KEY_STATUS} | 中间件规则静态校验 |
| 审计脱敏检查M-013 | ${REDACTION_STATUS} | 审计测试中存在敏感值禁止断言 |
| TOK 用例全量可执行覆盖 | ${CASE_COVERAGE_STATUS} | TOK-LIFE-001~008 / TOK-AUD-001~007 |
| staging 实测就绪性 | ${LIVE_READY} | ${LIVE_BLOCK_REASON:-ready} |
## 2. 证据路径
1. \`${ART_DIR}/go_test_output.txt\`
2. \`${LOG_FILE}\`
## 3. 判定
1. Dry-run 通过条件:
1. Go 测试执行=PASS
2. Query Key 外拒检查=PASS
3. 审计脱敏检查=PASS
4. TOK 用例全量可执行覆盖=PASS
2. staging 就绪性为 NO 时,仅表示“真实联调暂不可启动”,不影响开发阶段 dry-run 结论。
EOF
RESULT="PASS"
if [[ "${GO_TEST_STATUS}" != "PASS" || "${QUERY_KEY_STATUS}" != "PASS" || "${REDACTION_STATUS}" != "PASS" || "${CASE_COVERAGE_STATUS}" != "PASS" ]]; then
RESULT="FAIL"
fi
{
echo "[INFO] report: ${REPORT_FILE}"
echo "[INFO] artifact: ${ART_DIR}"
echo "[RESULT] ${RESULT}"
} | tee -a "${LOG_FILE}"
if [[ "${RESULT}" != "PASS" ]]; then
exit 1
fi

View File

@@ -0,0 +1,217 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
ENV_FILE="${1:-${SCRIPT_DIR}/.env}"
OUT_DIR="${ROOT_DIR}/reports/gates"
TS="$(date +%F_%H%M%S)"
BUNDLE_ID="tok006_gate_bundle_${TS}"
REPORT_FILE="${OUT_DIR}/${BUNDLE_ID}.md"
LOG_FILE="${OUT_DIR}/${BUNDLE_ID}.log"
mkdir -p "${OUT_DIR}"
if [[ ! -f "${ENV_FILE}" ]]; then
echo "[FAIL] env file not found: ${ENV_FILE}"
exit 1
fi
# shellcheck disable=SC1090
source "${ENV_FILE}"
ENABLE_TOK005_DRYRUN="${ENABLE_TOK005_DRYRUN:-1}"
ENABLE_SUP_RUN="${ENABLE_SUP_RUN:-0}"
log() {
local msg="$1"
echo "${msg}" | tee -a "${LOG_FILE}"
}
latest_file_or_empty() {
local pattern="$1"
local latest
latest="$(ls -1t ${pattern} 2>/dev/null | head -n 1 || true)"
echo "${latest}"
}
status_from_report() {
local file="$1"
if [[ -z "${file}" || ! -f "${file}" ]]; then
echo "BLOCKED"
return
fi
if grep -Eq '\bFAIL\b' "${file}"; then
echo "FAIL"
return
fi
if grep -Eq '\bPASS\b' "${file}"; then
echo "PASS"
return
fi
echo "BLOCKED"
}
env_from_report() {
local file="$1"
if [[ -z "${file}" || ! -f "${file}" ]]; then
echo "-"
return
fi
if grep -Eiq 'local-mock|mock' "${file}"; then
echo "mock"
return
fi
if grep -Eiq 'staging' "${file}"; then
echo "staging"
return
fi
echo "unknown"
}
extract_tok005_staging_readiness() {
local tok005_report="$1"
if [[ -z "${tok005_report}" || ! -f "${tok005_report}" ]]; then
echo "UNKNOWN|tok005 report missing"
return
fi
local row
row="$(grep -E '^\| staging 实测就绪性 \|' "${tok005_report}" | head -n 1 || true)"
if [[ -z "${row}" ]]; then
echo "UNKNOWN|staging readiness row missing"
return
fi
local ready reason
ready="$(echo "${row}" | awk -F'|' '{gsub(/[[:space:]]/, "", $3); print $3}')"
reason="$(echo "${row}" | awk -F'|' '{gsub(/^[[:space:]]+|[[:space:]]+$/, "", $4); print $4}')"
if [[ -z "${ready}" ]]; then
ready="UNKNOWN"
fi
echo "${ready}|${reason}"
}
any_fail=0
any_blocked=0
any_mock=0
log "[INFO] TOK-006 gate bundle started at ${TS}"
log "[INFO] env file: ${ENV_FILE}"
log "[INFO] ENABLE_TOK005_DRYRUN=${ENABLE_TOK005_DRYRUN}, ENABLE_SUP_RUN=${ENABLE_SUP_RUN}"
TOK005_STDOUT_LOG="${OUT_DIR}/${BUNDLE_ID}_tok005.stdout.log"
if [[ "${ENABLE_TOK005_DRYRUN}" == "1" ]]; then
set +e
bash "${SCRIPT_DIR}/tok005_boundary_dryrun.sh" "${ENV_FILE}" > "${TOK005_STDOUT_LOG}" 2>&1
tok005_rc=$?
set -e
log "[INFO] TOK-005 dry-run executed with rc=${tok005_rc}, stdout=${TOK005_STDOUT_LOG}"
else
log "[INFO] TOK-005 dry-run skipped by switch"
fi
if [[ "${ENABLE_SUP_RUN}" == "1" ]]; then
set +e
bash "${SCRIPT_DIR}/run_all.sh" "${ENV_FILE}" > "${OUT_DIR}/${BUNDLE_ID}_sup_run_all.stdout.log" 2>&1
sup_run_rc=$?
set -e
log "[INFO] SUP run_all executed with rc=${sup_run_rc}"
else
log "[INFO] SUP run_all skipped by switch"
fi
TOK005_REPORT="$(latest_file_or_empty "${OUT_DIR}/tok005_dryrun_*.md")"
SUP004_REPORT="$(latest_file_or_empty "${ROOT_DIR}/tests/supply/ui_sup_acc_report_*.md")"
SUP005_REPORT="$(latest_file_or_empty "${ROOT_DIR}/tests/supply/ui_sup_pkg_report_*.md")"
SUP006_REPORT="$(latest_file_or_empty "${ROOT_DIR}/tests/supply/ui_sup_set_report_*.md")"
SUP007_REPORT="$(latest_file_or_empty "${ROOT_DIR}/tests/supply/sec_sup_boundary_report_*.md")"
TOK005_STATUS="$(status_from_report "${TOK005_REPORT}")"
SUP004_STATUS="$(status_from_report "${SUP004_REPORT}")"
SUP005_STATUS="$(status_from_report "${SUP005_REPORT}")"
SUP006_STATUS="$(status_from_report "${SUP006_REPORT}")"
SUP007_STATUS="$(status_from_report "${SUP007_REPORT}")"
TOK005_ENV="$(env_from_report "${TOK005_REPORT}")"
SUP004_ENV="$(env_from_report "${SUP004_REPORT}")"
SUP005_ENV="$(env_from_report "${SUP005_REPORT}")"
SUP006_ENV="$(env_from_report "${SUP006_REPORT}")"
SUP007_ENV="$(env_from_report "${SUP007_REPORT}")"
for status in "${TOK005_STATUS}" "${SUP004_STATUS}" "${SUP005_STATUS}" "${SUP006_STATUS}" "${SUP007_STATUS}"; do
if [[ "${status}" == "FAIL" ]]; then
any_fail=1
fi
if [[ "${status}" == "BLOCKED" ]]; then
any_blocked=1
fi
done
for env_name in "${TOK005_ENV}" "${SUP004_ENV}" "${SUP005_ENV}" "${SUP006_ENV}" "${SUP007_ENV}"; do
if [[ "${env_name}" == "mock" ]]; then
any_mock=1
fi
done
readiness_pair="$(extract_tok005_staging_readiness "${TOK005_REPORT}")"
TOK005_STAGING_READY="${readiness_pair%%|*}"
TOK005_STAGING_REASON="${readiness_pair#*|}"
DECISION="CONDITIONAL_GO"
DECISION_REASON="all gates pass but include mock evidence or staging readiness is not YES"
if [[ "${any_fail}" -eq 1 || "${any_blocked}" -eq 1 ]]; then
DECISION="NO_GO"
DECISION_REASON="at least one gate failed or blocked"
elif [[ "${TOK005_STAGING_READY}" == "YES" && "${any_mock}" -eq 0 ]]; then
DECISION="GO"
DECISION_REASON="all gates pass with non-mock evidence and staging readiness is YES"
fi
cat > "${REPORT_FILE}" <<EOF
# TOK-006 统一 Gate 汇总报告
- 时间戳:${TS}
- 执行入口:\`scripts/supply-gate/tok006_gate_bundle.sh\`
- 环境文件:${ENV_FILE}
## 1. Gate 矩阵
| Gate | 状态 | 环境 | 证据 |
|---|---|---|---|
| TOK-005 dry-run | ${TOK005_STATUS} | ${TOK005_ENV} | ${TOK005_REPORT:-N/A} |
| SUP-004 账号挂载 | ${SUP004_STATUS} | ${SUP004_ENV} | ${SUP004_REPORT:-N/A} |
| SUP-005 套餐发布 | ${SUP005_STATUS} | ${SUP005_ENV} | ${SUP005_REPORT:-N/A} |
| SUP-006 结算提现 | ${SUP006_STATUS} | ${SUP006_ENV} | ${SUP006_REPORT:-N/A} |
| SUP-007 边界专项 | ${SUP007_STATUS} | ${SUP007_ENV} | ${SUP007_REPORT:-N/A} |
## 2. 关键约束检查
| 项目 | 值 | 说明 |
|---|---|---|
| TOK-005 staging readiness | ${TOK005_STAGING_READY} | ${TOK005_STAGING_REASON} |
| 是否存在 FAIL | ${any_fail} | 1=是, 0=否 |
| 是否存在 BLOCKED | ${any_blocked} | 1=是, 0=否 |
| 是否包含 mock 证据 | ${any_mock} | 1=是, 0=否 |
## 3. 发布判定(单页)
- 判定:**${DECISION}**
- 判定依据:${DECISION_REASON}
- 说明:
- GO全部 gate 通过,且非 mock且 staging readiness=YES。
- CONDITIONAL_GO全部 gate 通过,但存在 mock 证据或 staging readiness!=YES。
- NO_GO存在 FAIL/BLOCKED。
## 4. 下一步动作
1. 若判定为 CONDITIONAL_GO/NO_GO优先补齐真实 staging 参数并执行:
\`bash scripts/supply-gate/staging_precheck_and_run.sh scripts/supply-gate/.env\`
2. 联调完成后回填:
\`tests/supply/sec_sup_boundary_report_2026-03-30.md\`、\`reports/supply_gate_review_2026-03-31.md\`。
EOF
log "[INFO] bundle report generated: ${REPORT_FILE}"
log "[RESULT] ${DECISION}"
if [[ "${DECISION}" == "NO_GO" ]]; then
exit 1
fi