Files
wiki_crawler/backend/mcp_server.py

145 lines
4.7 KiB
Python
Raw Normal View History

import sys
import os
2026-01-20 01:51:39 +08:00
import logging
from typing import Optional # 确保引入 Optional
import threading
# 1. 路径兼容 (确保能找到 backend 包)
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from mcp.server.fastmcp import FastMCP
2026-01-20 01:51:39 +08:00
from backend.core.logger import setup_logging
from backend.services.crawler_service import crawler_service
2026-01-20 01:51:39 +08:00
# 2. 初始化日志 (必须走 stderr)
setup_logging()
logger = logging.getLogger("mcp_server")
# 3. 初始化 MCP 服务
mcp = FastMCP("WikiCrawler-V3")
@mcp.tool()
async def kb_add_website(url: str) -> str:
2026-01-20 01:51:39 +08:00
"""
[Admin] Input a URL to map and register a task.
This is the first step to add a knowledge base.
Args:
url: The root URL of the website (e.g., https://docs.firecrawl.dev).
Returns:
Task ID and count of found links.
"""
try:
res = crawler_service.map_site(url)
2026-01-20 01:51:39 +08:00
return f"Task Registered. ID: {res['task_id']}, Links Found: {res['count']}, Is New: {res['is_new']}"
except Exception as e:
2026-01-20 01:51:39 +08:00
logger.error(f"Add website failed: {e}", exc_info=True)
return f"Error: {e}"
@mcp.tool()
async def kb_check_status(task_id: int) -> str:
2026-01-20 01:51:39 +08:00
"""
[Monitor] Check detailed progress and active threads.
Use this to see if the crawler is still running or finished.
Args:
task_id: The ID of the task to check.
Returns:
A formatted report including progress stats and currently crawling URLs.
"""
data = crawler_service.get_task_status(task_id)
if not data: return "Task not found."
s = data['stats']
threads = data['active_threads']
2026-01-20 01:51:39 +08:00
# 格式化输出给 LLM 阅读
report = (
2026-01-20 01:51:39 +08:00
f"--- Task {task_id} Status ---\n"
f"Root URL: {data['root_url']}\n"
f"Progress: {s['completed']}/{s['total']} (Pending: {s['pending']})\n"
2026-01-20 01:51:39 +08:00
f"Active Threads (Running): {len(threads)}\n"
)
2026-01-20 01:51:39 +08:00
if threads:
report += "Currently Crawling:\n" + "\n".join([f"- {t}" for t in threads[:5]])
2026-01-20 01:51:39 +08:00
if len(threads) > 5:
report += f"\n... and {len(threads)-5} more."
return report
@mcp.tool()
2026-01-20 01:51:39 +08:00
async def kb_run_crawler(task_id: int, batch_size: int = 20) -> str:
"""
[Action] Trigger the crawler in BACKGROUND mode.
This returns immediately, so you can use 'kb_check_status' to monitor progress.
Args:
task_id: The ID of the task.
batch_size: Number of URLs to process (suggest 10-20).
Returns:
Status message confirming start.
"""
# 定义一个在后台跑的包装函数
def background_task():
try:
logger.info(f"Background batch started for Task {task_id}")
# 这里是阻塞操作,但它现在跑在独立线程里
crawler_service.process_queue_concurrent(task_id, batch_size)
logger.info(f"Background batch finished for Task {task_id}")
except Exception as e:
logger.error(f"Background task failed: {e}", exc_info=True)
# 2. 创建并启动线程
thread = threading.Thread(target=background_task)
thread.daemon = True # 设置为守护线程,防止主程序退出时卡死
thread.start()
# 3. 立即返回,不等待爬取结束
return f"🚀 Background crawler started for Task {task_id} (Batch Size: {batch_size}). You can now check status."
@mcp.tool()
2026-01-20 01:51:39 +08:00
async def kb_search(query: str, task_id: Optional[int] = None, limit: int = 5) -> str:
"""
[User] Search knowledge base with Hybrid Search & Rerank.
2026-01-20 01:51:39 +08:00
Args:
query: The user's question or search keywords.
task_id: (Optional) Limit search to a specific task ID.
limit: (Optional) Number of results to return (default 5).
Returns:
Ranked content blocks with source paths.
"""
try:
res = crawler_service.search(query, task_id, limit)
results = res.get('results', [])
if not results: return "No results found."
output = []
for i, r in enumerate(results):
score_display = f"{r['score']:.4f}" + (" (Reranked)" if r.get('reranked') else "")
meta = r.get('meta_info', {})
path = meta.get('header_path', 'Root')
# 格式化单个结果块
block = (
f"[{i+1}] Score: {score_display}\n"
f"Path: {path}\n"
f"Content: {r['content'][:300]}..." # 限制长度防止 Context 溢出
)
output.append(block)
return "\n\n".join(output)
except Exception as e:
logger.error(f"Search failed: {e}", exc_info=True)
return f"Search Error: {e}"
if __name__ == "__main__":
2026-01-20 01:51:39 +08:00
# 启动 MCP 服务
mcp.run()