#!/usr/bin/env python3
"""
生成 AI 应用场景每日简报
"""

import sqlite3
from datetime import datetime, timedelta
import json

# 数据库路径
DB_PATH = "/root/.openclaw/workspace/projects/ai_usecase_collector/data/ai_usecases.db"
OUTPUT_PATH = "/root/vault/obsidian_vault/obsidian/Documents/obsidian/CLAW/AI应用场景/2026-03-20-每日简报.md"

def get_database_stats():
    """获取数据库统计信息"""
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()

    # 总场景数
    cursor.execute("SELECT COUNT(*) FROM scenes")
    total = cursor.fetchone()[0]

    # 今日新增
    today = datetime.now().strftime('%Y-%m-%d')
    cursor.execute("SELECT COUNT(*) FROM scenes WHERE DATE(discovery_date) = ?", (today,))
    today_new = cursor.fetchone()[0]

    # 创新分数分布
    cursor.execute("""
        SELECT
            CASE
                WHEN innovation_score >= 9 THEN '高创新 (9-10)'
                WHEN innovation_score >= 7 THEN '中创新 (7-8)'
                ELSE '基础创新 (6及以下)'
            END as level,
            COUNT(*) as count
        FROM scenes
        GROUP BY level
    """)
    innovation_dist = {row[0]: row[1] for row in cursor.fetchall()}

    # 行业分布
    cursor.execute("""
        SELECT categories, COUNT(*) as count
        FROM scenes
        GROUP BY categories
        ORDER BY count DESC
        LIMIT 10
    """)
    category_dist = cursor.fetchall()

    # 最近30天新增趋势
    cursor.execute("""
        SELECT DATE(discovery_date) as date, COUNT(*) as count
        FROM scenes
        WHERE discovery_date >= date('now', '-30 days')
        GROUP BY DATE(discovery_date)
        ORDER BY date DESC
    """)
    recent_trend = cursor.fetchall()

    conn.close()

    return {
        'total': total,
        'today_new': today_new,
        'innovation_dist': innovation_dist,
        'category_dist': category_dist,
        'recent_trend': recent_trend
    }

def get_top_scenes(limit=10):
    """获取高价值场景"""
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()

    cursor.execute("""
        SELECT title, description, categories, tags, key_technologies,
               innovation_score, source_platform, original_url, discovery_date
        FROM scenes
        ORDER BY innovation_score DESC, discovery_date DESC
        LIMIT ?
    """, (limit,))

    scenes = []
    for row in cursor.fetchall():
        scenes.append({
            'title': row[0],
            'description': row[1],
            'categories': row[2],
            'tags': row[3],
            'key_technologies': row[4],
            'innovation_score': row[5],
            'source': row[6],
            'url': row[7],
            'date': row[8]
        })

    conn.close()
    return scenes

def get_recent_scenes(days=7, limit=15):
    """获取最近发现的新场景"""
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()

    cursor.execute("""
        SELECT title, description, categories, tags, key_technologies,
               innovation_score, source_platform, original_url, discovery_date
        FROM scenes
        WHERE discovery_date >= date('now', ?)
        ORDER BY discovery_date DESC, innovation_score DESC
        LIMIT ?
    """, (f'-{days} days', limit))

    scenes = []
    for row in cursor.fetchall():
        scenes.append({
            'title': row[0],
            'description': row[1],
            'categories': row[2],
            'tags': row[3],
            'key_technologies': row[4],
            'innovation_score': row[5],
            'source': row[6],
            'url': row[7],
            'date': row[8]
        })

    conn.close()
    return scenes

def get_trending_keywords():
    """提取热门关键词"""
    conn = sqlite3.connect(DB_PATH)
    cursor = conn.cursor()

    # 从 tags 和 key_technologies 中提取热门词汇
    cursor.execute("""
        SELECT tags, key_technologies
        FROM scenes
        WHERE discovery_date >= date('now', '-14 days')
    """)

    all_keywords = {}
    for row in cursor.fetchall():
        tags = row[0] or ''
        tech = row[1] or ''

        # 处理 tags
        if tags:
            for tag in tags.split(','):
                tag = tag.strip().lower()
                if tag:
                    all_keywords[tag] = all_keywords.get(tag, 0) + 1

        # 处理 key_technologies
        if tech:
            for t in tech.split(','):
                t = t.strip().lower()
                if t:
                    all_keywords[t] = all_keywords.get(t, 0) + 1

    conn.close()

    # 按频率排序
    sorted_keywords = sorted(all_keywords.items(), key=lambda x: x[1], reverse=True)
    return sorted_keywords[:15]

def generate_markdown_report():
    """生成 Markdown 格式的报告"""
    today = datetime.now().strftime('%Y年%m月%d日')
    date_slug = datetime.now().strftime('%Y-%m-%d')

    # 获取数据
    stats = get_database_stats()
    top_scenes = get_top_scenes(10)
    recent_scenes = get_recent_scenes(7, 10)
    trending_keywords = get_trending_keywords()

    # 生成报告
    report = f"""# AI 应用场景每日简报

**日期**: {today}
**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}

---

## 📊 数据概览

- **总场景数**: {stats['total']} 个
- **今日新增**: {stats['today_new']} 个
- **最近30天活跃度**: {len(stats['recent_trend'])} 天有新增

### 创新分数分布
"""
    for level, count in sorted(stats['innovation_dist'].items(), key=lambda x: x[0], reverse=True):
        report += f"- **{level}**: {count} 个\n"

    report += f"""
### 热门类别 (Top 5)
"""
    for i, (cat, count) in enumerate(stats['category_dist'][:5], 1):
        report += f"{i}. **{cat}** ({count} 个)\n"

    report += f"""
---

## 🔥 今日新发现高价值场景

以下是今天收集到的高创新分数场景：
"""
    for scene in recent_scenes[:3]:
        score_emoji = '🔥' if scene['innovation_score'] >= 10 else '⭐' if scene['innovation_score'] >= 9 else '✨'
        report += f"""
### {score_emoji} {scene['title']}

**创新分数**: {scene['innovation_score']}/10  |  **来源**: {scene['source']}  |  **发现时间**: {scene['date'][:10]}

**类别**: {scene['categories']}

**核心描述**: {scene['description'][:200]}...

**关键标签**: `{scene['tags']}`

**关键技术**: `{scene['key_technologies']}`

🔗 [查看原文]({scene['url']})

---

"""

    report += f"""
---

## 🏆 全局高价值场景 Top 5

"""
    for i, scene in enumerate(top_scenes[:5], 1):
        score_emoji = '🔥' if scene['innovation_score'] >= 10 else '⭐' if scene['innovation_score'] >= 9 else '✨'
        report += f"""
**{i}. {score_emoji} {scene['title']}** ({scene['innovation_score']}/10)
> {scene['description'][:120]}...
> *类别: {scene['categories']} | 来源: {scene['source']}*

---

"""

    report += f"""
---

##📈 热门关键词趋势

以下是最近两周出现频率最高的关键词和技术标签：

"""
    keywords_md = []
    for i, (keyword, count) in enumerate(trending_keywords, 1):
        keywords_md.append(f"`{keyword}`({count})")

    report += " ".join(keywords_md[:10]) + "\n"

    report += f"""

---

## 💡 核心趋势洞察

基于最近收集的数据，可以观察到以下关键趋势：

### 1. **从单点代理到多代理协作**
单用途代理时代已经结束。企业正转向多代理系统（如 CrewAI），多个专业化代理协同工作完成复杂任务。这是 2026 年最显著的趋势之一。

### 2. **治理优先于速度**
Agentic AI 的部署不再关注"谁能最先"，而是关注"谁能控制"。治理、可观察性和人类监督成为企业采用 Agentic AI 的核心考虑因素。

### 3. **跨行业深度渗透**
从医疗保健的语音记录（节省 $1M+/年）到制造业的自主维护调度，AI 代理正在各个垂直行业的核心工作流程中创造实际价值。

### 4. **编排成为关键**
2026 年是编排被广泛认可为规模化 AI 的连接组织的一年。Agentic 编排让智能代理在工作流中协调决策和行动，而非孤立运行。

### 5. **自改进代理的兴起**
自改进 AI 代理系统开始从研究走向实践。这些系统不仅执行任务，还能自主学习和优化，代表下一代 AI 能力。

---

## 📂 数据管理信息

- **数据库路径**: `{DB_PATH}`
- **最后更新**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
- **数据源**: Tavily AI Search API

---

*本报告由 AI 场景洞察模块自动生成 | 数据库持续更新中*
"""

    return report

def save_report(report):
    """保存报告到 Obsidian"""
    # 确保目录存在
    import os
    os.makedirs(os.path.dirname(OUTPUT_PATH), exist_ok=True)

    with open(OUTPUT_PATH, 'w', encoding='utf-8') as f:
        f.write(report)

    print(f"✓ 报告已保存至: {OUTPUT_PATH}")

def main():
    """主函数"""
    print("正在生成 AI 应用场景每日简报...\n")

    # 生成报告
    report = generate_markdown_report()

    # 保存报告
    save_report(report)

    print("\n报告生成完成！")

    return report

if __name__ == "__main__":
    main()
