v3接口restful风格,规范化接口;添加mcp服务器;新增log模块

This commit is contained in:
2026-01-19 23:54:29 +08:00
parent 389c13a2a7
commit 7c99e67a7f
14 changed files with 780 additions and 376 deletions

View File

@@ -1,182 +1,179 @@
# backend/services/crawler_service.py
import concurrent.futures
import threading
from firecrawl import FirecrawlApp
from backend.core.config import settings
from backend.services.data_service import data_service
from backend.services.llm_service import llm_service
from backend.utils.text_process import text_processor
import logging
# 获取当前模块的专用 Logger
# __name__ 会自动识别为 "backend.services.crawler_service" 这样的路径
logger = logging.getLogger(__name__)
class CrawlerService:
"""
爬虫编排服务
协调 Firecrawl, LLM, 和 DataService
"""
def __init__(self):
self.firecrawl = FirecrawlApp(api_key=settings.FIRECRAWL_API_KEY)
self.max_workers = 5
# [新增] 内存状态追踪
# 结构: { task_id: { url: "status_desc" } }
self._active_workers = {}
self._lock = threading.Lock()
def _track_start(self, task_id, url):
"""开始追踪某个URL"""
with self._lock:
if task_id not in self._active_workers:
self._active_workers[task_id] = set()
self._active_workers[task_id].add(url)
def _track_end(self, task_id, url):
"""结束追踪某个URL"""
with self._lock:
if task_id in self._active_workers:
self._active_workers[task_id].discard(url)
def get_task_status(self, task_id: int):
"""
[综合监控] 获取全量状态 = 数据库统计 + 实时线程列表
"""
# 1. 获取数据库层面的统计 (宏观)
db_data = data_service.get_task_monitor_data(task_id)
if not db_data:
return None
# 2. 获取内存层面的活跃线程 (微观)
with self._lock:
active_urls = list(self._active_workers.get(task_id, []))
# 输出情况
logger.info(f"Task {task_id} active threads: {active_urls}")
logger.info(f"Task {task_id} stats: {db_data['db_stats']}") # 打印数据库统计信息
return {
"root_url": db_data["root_url"],
"stats": db_data["db_stats"], # Pending, Completed, Failed 等
"active_threads": active_urls, # 当前 CPU/网络 正在处理的 URL
"active_thread_count": len(active_urls)
}
def map_site(self, start_url: str):
print(f"[INFO] Mapping: {start_url}")
"""阶段1站点地图扫描"""
logger.info(f"Mapping: {start_url}")
try:
# 1. 注册任务
task_res = data_service.register_task(start_url)
urls_to_add = [start_url]
# 2. 无论是否新任务,都尝试把 start_url 加入队列
urls_to_add = [start_url]
# 3. 调用 Firecrawl Map
# 如果任务已存在,不再重新 Map直接返回
if not task_res['is_new_task']:
logger.info(f"Task {task_res['task_id']} exists, skipping map.")
return {
"task_id": task_res['task_id'],
"count": 0,
"is_new": False
}
# 新任务执行 Map
try:
map_res = self.firecrawl.map(start_url)
# 兼容不同 SDK 版本的返回 (对象 或 字典)
found_links = []
if isinstance(map_res, dict):
found_links = map_res.get('links', [])
else:
found_links = getattr(map_res, 'links', [])
found_links = map_res.get('links', []) if isinstance(map_res, dict) else getattr(map_res, 'links', [])
for link in found_links:
# 链接可能是字符串,也可能是对象
u = link if isinstance(link, str) else getattr(link, 'url', str(link))
urls_to_add.append(u)
print(f"[INFO] Firecrawl Map found {len(found_links)} sub-links.")
logger.info(f"Map found {len(found_links)} links")
except Exception as e:
print(f"[WARN] Firecrawl Map warning (proceeding with seed only): {e}")
logger.warning(f"Map failed, proceeding with seed only: {e}")
# 4. 批量入库
if urls_to_add:
data_service.add_urls(task_res['task_id'], urls_to_add)
return {
"msg": "Mapped successfully",
"count": len(urls_to_add),
"task_id": task_res['task_id']
"task_id": task_res['task_id'],
"count": len(urls_to_add),
"is_new": True
}
except Exception as e:
print(f"[ERROR] Map failed: {e}")
logger.error(f"Map failed: {e}")
raise e
def process_queue(self, task_id: int, batch_size: int = 5):
"""
处理队列:爬取 -> 清洗 -> 切分 -> 向量化 -> 存储
"""
pending = data_service.get_pending_urls(task_id, batch_size)
urls = pending['urls']
if not urls: return {"msg": "Queue empty"}
processed = 0
for url in urls:
try:
print(f"[INFO] Scraping: {url}")
# 调用 Firecrawl
scrape_res = self.firecrawl.scrape(
url,
formats=['markdown'],
only_main_content=True
)
# =====================================================
# 【核心修复】兼容字典和对象两种返回格式
# =====================================================
raw_md = ""
metadata = None
# 1. 提取 markdown 和 metadata 本体
if isinstance(scrape_res, dict):
raw_md = scrape_res.get('markdown', '')
metadata = scrape_res.get('metadata', {})
else:
# 如果是对象 (ScrapeResponse)
raw_md = getattr(scrape_res, 'markdown', '')
metadata = getattr(scrape_res, 'metadata', {})
# 2. 从 metadata 中安全提取 title
title = url # 默认用 url 当标题
if metadata:
if isinstance(metadata, dict):
# 如果 metadata 是字典
title = metadata.get('title', url)
else:
# 如果 metadata 是对象 (DocumentMetadata) -> 这里就是你报错的地方
title = getattr(metadata, 'title', url)
# =====================================================
if not raw_md:
print(f"[WARN] Empty content for {url}")
continue
# 1. 清洗
clean_md = text_processor.clean_markdown(raw_md)
if not clean_md: continue
# 2. 切分
chunks = text_processor.split_markdown(clean_md)
chunks_data = []
for i, chunk in enumerate(chunks):
content = chunk['content']
headers = chunk['metadata']
header_path = " > ".join(headers.values())
# 3. 向量化 (Title + Header + Content)
emb_input = f"{title}\n{header_path}\n{content}"
vector = llm_service.get_embedding(emb_input)
if vector:
chunks_data.append({
"index": i,
"content": content,
"embedding": vector,
"meta_info": {"header_path": header_path, "headers": headers}
})
# 4. 存储
if chunks_data:
data_service.save_chunks(task_id, url, title, chunks_data)
processed += 1
except Exception as e:
# 打印详细错误方便调试
print(f"[ERROR] Process URL {url} failed: {e}")
def _process_single_url(self, task_id: int, url: str):
"""[Worker] 单个 URL 处理线程"""
# 1. 内存标记:开始
self._track_start(task_id, url)
logger.info(f"[THREAD START] {url}")
return {"msg": "Batch processed", "count": processed}
try:
# 2. 爬取
scrape_res = self.firecrawl.scrape(
url, formats=['markdown'], only_main_content=True
)
raw_md = getattr(scrape_res, 'markdown', '') if not isinstance(scrape_res, dict) else scrape_res.get('markdown', '')
metadata = getattr(scrape_res, 'metadata', {}) if not isinstance(scrape_res, dict) else scrape_res.get('metadata', {})
title = getattr(metadata, 'title', url) if not isinstance(metadata, dict) else metadata.get('title', url)
if not raw_md:
data_service.mark_url_status(task_id, url, 'failed')
return
# 3. 清洗 & 切分
clean_md = text_processor.clean_markdown(raw_md)
chunks = text_processor.split_markdown(clean_md)
chunks_data = []
for i, chunk in enumerate(chunks):
headers = chunk['metadata']
path = " > ".join(headers.values())
emb_input = f"{title}\n{path}\n{chunk['content']}"
vector = llm_service.get_embedding(emb_input)
if vector:
chunks_data.append({
"index": i, "content": chunk['content'], "embedding": vector,
"meta_info": {"header_path": path, "headers": headers}
})
# 4. 入库 (会自动标记 completed)
if chunks_data:
data_service.save_chunks(task_id, url, title, chunks_data)
else:
data_service.mark_url_status(task_id, url, 'failed')
except Exception as e:
logger.error(f"[THREAD ERROR] {url}: {e}")
data_service.mark_url_status(task_id, url, 'failed')
finally:
# 5. 内存标记:结束 (无论成功失败都要移除)
self._track_end(task_id, url)
def process_queue_concurrent(self, task_id: int, batch_size: int = 10):
"""阶段2多线程并发处理"""
urls = data_service.get_pending_urls(task_id, limit=batch_size)
if not urls: return {"msg": "No pending urls"}
logger.info(f"Batch started: {len(urls)} urls")
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
# 提交任务到线程池
futures = {executor.submit(self._process_single_url, task_id, url): url for url in urls}
# 等待完成
concurrent.futures.wait(futures)
return {"msg": "Batch completed", "count": len(urls)}
def search(self, query: str, task_id, return_num: int):
"""
全链路搜索:向量生成 -> 混合检索(粗排) -> 重排序(精排)
"""
# 1. 生成查询向量
"""阶段3搜索"""
vector = llm_service.get_embedding(query)
if not vector: return {"msg": "Embedding failed", "results": []}
# 2. 计算粗排召回数量
# 逻辑:至少召回 50 个,如果用户要很多,则召回 10 倍
coarse_limit = return_num * 10 if return_num * 10 > settings.CANDIDATE_NUM else settings.CANDIDATE_NUM
coarse_limit = min(return_num * 10, 100)
coarse_limit = max(coarse_limit, 50)
# 3. 执行混合检索 (粗排)
coarse_results = data_service.search(
query_text=query,
query_vector=vector,
task_id=task_id,
candidates_num=coarse_limit # 使用计算出的粗排数量
)
candidates = coarse_results.get('results', [])
if not candidates:
return {"msg": "No documents found", "results": []}
coarse_res = data_service.search(query, vector, task_id, coarse_limit)
candidates = coarse_res.get('results', [])
# 4. 执行重排序 (精排)
final_results = llm_service.rerank(
query=query,
documents=candidates,
top_n=return_num # 最终返回用户需要的数量
)
if not candidates: return {"results": []}
final_res = llm_service.rerank(query, candidates, return_num)
return {"results": final_res}
return {
"results": final_results,
"msg": f"Reranked {len(final_results)} from {len(candidates)} candidates"
}
crawler_service = CrawlerService()