Compare commits
3 Commits
d5ee00d404
...
e1a94d4bc7
| Author | SHA1 | Date | |
|---|---|---|---|
| e1a94d4bc7 | |||
| 36bc0cc08b | |||
| e5ac2dde03 |
36
README.md
36
README.md
@@ -6,13 +6,39 @@
|
|||||||
|
|
||||||
完成wiki网页爬取和向量化与知识库查找
|
完成wiki网页爬取和向量化与知识库查找
|
||||||
|
|
||||||
|
|
||||||
## 当前状况
|
## 当前状况
|
||||||
|
|
||||||
1. 当前在我的电脑本地跑,没部署,看chenwei有没有空了教我往我们服务器上,我自己买的学生服务器还没来得及放上去,三月份到期
|
1. chunk分段逻辑:根据返回的markdown进行分割,按照#、##进行标题的分类,增加JSONB格式字段meta_info,有下面两个字段,分别可以用于数据库查询和LLM上下文认知资料来源
|
||||||
2. 这个demo后端只实现了功能没有auth相关的部分,后续可以直接迁移,chenwei那边gtco_ai开一个模块放进去
|
|
||||||
3. firecrawl的apikey,我自己的免费试用apikey快用完了,需要准备部署,调查付费
|
```python
|
||||||
4. 可演示,但是还没有包装到可以向客户汇报的层次,后续考虑直接用dify做一个工具包装,集成到Done的bot里;或者用chatflow直接包装,里面用节点请求部署好的后端进行知识库查询
|
# 源数据 (headers)
|
||||||
|
headers = {"h1": "产品介绍", "h2": "核心功能", "h3": "多语言支持"}
|
||||||
|
|
||||||
|
# 生成数据 (header_path)
|
||||||
|
# Python 代码逻辑: " > ".join(headers.values())
|
||||||
|
header_path = "产品介绍 > 核心功能 > 多语言支持"
|
||||||
|
```
|
||||||
|
2. 量化指标以及测试:目前存入的数据较少,测试结果可能偏差较大
|
||||||
|
|
||||||
|
```
|
||||||
|
"p_at_1": [], # Precision@1: 首位精确率
|
||||||
|
"hit_at_5": [], # HitRate@5: 前5命中率,即返回的前五个(目前设置只返回5个)是否符合问题
|
||||||
|
"mrr": [], # Mean Reciprocal Rank: 倒数排名分数,正确答案排得越靠前,分数越高
|
||||||
|
"latency": [] # 响应耗时
|
||||||
|
```
|
||||||
|
3. 搜索逻辑和问题分类:尚未实现,目前参考一些主流的做法,用户输入后先过一个LLM对问题进行拆分和分类,然后传入对应的知识库参数task_id进行对应的检索
|
||||||
|
4. RAG逻辑:混合检索,使用向量和关键词混合检索,此处进行粗筛,数据层返回后在业务层调用 gte-rerank 模型进行重排,最后返回请求
|
||||||
|
|
||||||
|
```python
|
||||||
|
vector_score = (1 - self.db.chunks.c.embedding.cosine_distance(query_vector))# 计算向量相似度
|
||||||
|
keyword_score = func.ts_rank(self.db.chunks.c.content_tsvector, keyword_query) # 计算关键词相似度
|
||||||
|
final_score = (vector_score * 0.7 + func.coalesce(keyword_score, 0) * 0.3).label("score")# 计算最终分数
|
||||||
|
```
|
||||||
|
5. 产品面向场景:客户需求爬取几个文档,并长期维护更新,后续需要新增,但是量相对不会太大,firecrawl付费大概不会太贵。爬虫获取完整wiki(可无视robots.txt),当前知识库存入和爬虫绑定强,依赖markdown格式存入
|
||||||
|
6. 后续开发:添加旧wiki的更新维护功能。dify增加对后端的封装,做一套搜索逻辑和问题分类的节点,如果不好弄那还是迁回到后端,后端只提供知识库的mcp,bot调用mcp之后,自行调用实现搜索和问题分类
|
||||||
|
|
||||||
|
对比其他检索方法的优势,做一套评测机制标准,评估最终LLM输出的准确度,目前是知识库检索准确度
|
||||||
|
|
||||||
|
|
||||||
切割逻辑,准确率定义,归结资料,测试设计,mcp服务调用,搜索逻辑,问题分类,流程架构设计,场景假设
|
切割逻辑,准确率定义,归结资料,测试设计,mcp服务调用,搜索逻辑,问题分类,流程架构设计,场景假设
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ class Settings(BaseSettings):
|
|||||||
系统配置类
|
系统配置类
|
||||||
自动读取环境变量或 .env 文件
|
自动读取环境变量或 .env 文件
|
||||||
"""
|
"""
|
||||||
|
CANDIDATE_NUM: int = 10
|
||||||
|
|
||||||
DB_USER: str
|
DB_USER: str
|
||||||
DB_PASS: str
|
DB_PASS: str
|
||||||
DB_HOST: str
|
DB_HOST: str
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ async def auto_process(req: AutoProcessRequest, bg_tasks: BackgroundTasks):
|
|||||||
@router.post("/search")
|
@router.post("/search")
|
||||||
async def search_smart(req: TextSearchRequest):
|
async def search_smart(req: TextSearchRequest):
|
||||||
try:
|
try:
|
||||||
res = crawler_service.search(req.query, req.task_id, req.limit)
|
res = crawler_service.search(req.query, req.task_id, req.return_num)
|
||||||
return make_response(1, res.pop("msg", "Success"), res)
|
return make_response(1, res.pop("msg", "Success"), res)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return make_response(0, str(e))
|
return make_response(0, str(e))
|
||||||
@@ -13,7 +13,6 @@ class AddUrlsRequest(BaseModel):
|
|||||||
task_id: int
|
task_id: int
|
||||||
urls_obj: dict
|
urls_obj: dict
|
||||||
|
|
||||||
# schemas.py
|
|
||||||
class CrawlResult(BaseModel):
|
class CrawlResult(BaseModel):
|
||||||
source_url: str
|
source_url: str
|
||||||
chunk_index: int # 新增字段
|
chunk_index: int # 新增字段
|
||||||
@@ -31,11 +30,6 @@ class SearchRequest(BaseModel):
|
|||||||
query_embedding: dict
|
query_embedding: dict
|
||||||
limit: Optional[int] = 5
|
limit: Optional[int] = 5
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ... (保留原有的 Schema: RegisterRequest, AddUrlsRequest 等) ...
|
|
||||||
|
|
||||||
# === V2 New Schemas ===
|
# === V2 New Schemas ===
|
||||||
class AutoMapRequest(BaseModel):
|
class AutoMapRequest(BaseModel):
|
||||||
url: str
|
url: str
|
||||||
@@ -47,4 +41,4 @@ class AutoProcessRequest(BaseModel):
|
|||||||
class TextSearchRequest(BaseModel):
|
class TextSearchRequest(BaseModel):
|
||||||
query: str # 用户直接传文字,不需要传向量了
|
query: str # 用户直接传文字,不需要传向量了
|
||||||
task_id: Optional[int] = None
|
task_id: Optional[int] = None
|
||||||
limit: Optional[int] = 5
|
return_num: Optional[int] = 5
|
||||||
@@ -142,9 +142,41 @@ class CrawlerService:
|
|||||||
|
|
||||||
return {"msg": "Batch processed", "count": processed}
|
return {"msg": "Batch processed", "count": processed}
|
||||||
|
|
||||||
def search(self, query: str, task_id, limit: int):
|
def search(self, query: str, task_id, return_num: int):
|
||||||
|
"""
|
||||||
|
全链路搜索:向量生成 -> 混合检索(粗排) -> 重排序(精排)
|
||||||
|
"""
|
||||||
|
# 1. 生成查询向量
|
||||||
vector = llm_service.get_embedding(query)
|
vector = llm_service.get_embedding(query)
|
||||||
if not vector: return {"msg": "Embedding failed", "results": []}
|
if not vector: return {"msg": "Embedding failed", "results": []}
|
||||||
return data_service.search(query_text=query, query_vector=vector, task_id=task_id, limit=limit)
|
|
||||||
|
|
||||||
|
# 2. 计算粗排召回数量
|
||||||
|
# 逻辑:至少召回 50 个,如果用户要很多,则召回 10 倍
|
||||||
|
coarse_limit = return_num * 10 if return_num * 10 > settings.CANDIDATE_NUM else settings.CANDIDATE_NUM
|
||||||
|
|
||||||
|
# 3. 执行混合检索 (粗排)
|
||||||
|
coarse_results = data_service.search(
|
||||||
|
query_text=query,
|
||||||
|
query_vector=vector,
|
||||||
|
task_id=task_id,
|
||||||
|
candidates_num=coarse_limit # 使用计算出的粗排数量
|
||||||
|
)
|
||||||
|
|
||||||
|
candidates = coarse_results.get('results', [])
|
||||||
|
|
||||||
|
if not candidates:
|
||||||
|
return {"msg": "No documents found", "results": []}
|
||||||
|
|
||||||
|
# 4. 执行重排序 (精排)
|
||||||
|
final_results = llm_service.rerank(
|
||||||
|
query=query,
|
||||||
|
documents=candidates,
|
||||||
|
top_n=return_num # 最终返回用户需要的数量
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"results": final_results,
|
||||||
|
"msg": f"Reranked {len(final_results)} from {len(candidates)} candidates"
|
||||||
|
}
|
||||||
|
|
||||||
crawler_service = CrawlerService()
|
crawler_service = CrawlerService()
|
||||||
@@ -91,25 +91,21 @@ class DataService:
|
|||||||
|
|
||||||
return {"msg": f"Saved {count} chunks", "count": count}
|
return {"msg": f"Saved {count} chunks", "count": count}
|
||||||
|
|
||||||
def search(self, query_text: str, query_vector: list, task_id = None, limit: int = 5):
|
def search(self, query_text: str, query_vector: list, task_id=None, candidates_num: int = 5):
|
||||||
"""
|
"""
|
||||||
Phase 2: 混合检索 (Hybrid Search)
|
Phase 2: 混合检索 (Hybrid Search)
|
||||||
综合 向量相似度 (Semantic) 和 关键词匹配度 (Keyword)
|
|
||||||
"""
|
"""
|
||||||
|
# 向量格式清洗
|
||||||
|
if hasattr(query_vector, 'tolist'): query_vector = query_vector.tolist()
|
||||||
|
if query_vector and isinstance(query_vector, list) and len(query_vector) > 0:
|
||||||
|
if isinstance(query_vector[0], list): query_vector = query_vector[0]
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
with self.db.engine.connect() as conn:
|
with self.db.engine.connect() as conn:
|
||||||
# 定义混合检索的 SQL 逻辑
|
keyword_query = func.websearch_to_tsquery('english', query_text) # 转换为 tsquery
|
||||||
|
vector_score = (1 - self.db.chunks.c.embedding.cosine_distance(query_vector))# 计算向量相似度
|
||||||
# 使用 websearch_to_tsquery 处理用户输入 (支持 "firecrawl or dify" 这种语法)
|
keyword_score = func.ts_rank(self.db.chunks.c.content_tsvector, keyword_query) # 计算关键词相似度
|
||||||
keyword_query = func.websearch_to_tsquery('english', query_text)
|
final_score = (vector_score * 0.7 + func.coalesce(keyword_score, 0) * 0.3).label("score")# 计算最终分数
|
||||||
|
|
||||||
vector_score = (1 - self.db.chunks.c.embedding.cosine_distance(query_vector))
|
|
||||||
keyword_score = func.ts_rank(self.db.chunks.c.content_tsvector, keyword_query)
|
|
||||||
|
|
||||||
# 综合打分列: 0.7 * Vector + 0.3 * Keyword
|
|
||||||
# coalesce 确保如果关键词得分为 NULL (无匹配),则视为 0
|
|
||||||
final_score = (vector_score * 0.7 + func.coalesce(keyword_score, 0) * 0.3).label("score")
|
|
||||||
|
|
||||||
stmt = select(
|
stmt = select(
|
||||||
self.db.chunks.c.task_id,
|
self.db.chunks.c.task_id,
|
||||||
@@ -123,8 +119,8 @@ class DataService:
|
|||||||
if task_id:
|
if task_id:
|
||||||
stmt = stmt.where(self.db.chunks.c.task_id == task_id)
|
stmt = stmt.where(self.db.chunks.c.task_id == task_id)
|
||||||
|
|
||||||
# 按综合分数倒序
|
# 使用 candidates_num 控制召回数量
|
||||||
stmt = stmt.order_by(desc("score")).limit(limit)
|
stmt = stmt.order_by(desc("score")).limit(candidates_num)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
rows = conn.execute(stmt).fetchall()
|
rows = conn.execute(stmt).fetchall()
|
||||||
@@ -141,23 +137,19 @@ class DataService:
|
|||||||
]
|
]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"[ERROR] Hybrid search failed: {e}")
|
print(f"[ERROR] Hybrid search failed: {e}")
|
||||||
return self._fallback_vector_search(query_vector, task_id, limit)
|
return self._fallback_vector_search(query_vector, task_id, candidates_num)
|
||||||
|
|
||||||
return {"results": results, "msg": f"Hybrid found {len(results)}"}
|
return {"results": results, "msg": f"Hybrid found {len(results)}"}
|
||||||
|
|
||||||
def _fallback_vector_search(self, vector, task_id, limit):
|
def _fallback_vector_search(self, vector, task_id, limit):
|
||||||
"""降级兜底:纯向量搜索"""
|
|
||||||
print("[WARN] Fallback to pure vector search")
|
print("[WARN] Fallback to pure vector search")
|
||||||
with self.db.engine.connect() as conn:
|
with self.db.engine.connect() as conn:
|
||||||
stmt = select(
|
stmt = select(
|
||||||
self.db.chunks.c.task_id, self.db.chunks.c.source_url, self.db.chunks.c.title,
|
self.db.chunks.c.task_id, self.db.chunks.c.source_url, self.db.chunks.c.title,
|
||||||
self.db.chunks.c.content, self.db.chunks.c.meta_info
|
self.db.chunks.c.content, self.db.chunks.c.meta_info
|
||||||
).order_by(self.db.chunks.c.embedding.cosine_distance(vector)).limit(limit)
|
).order_by(self.db.chunks.c.embedding.cosine_distance(vector)).limit(limit)
|
||||||
|
|
||||||
if task_id:
|
if task_id:
|
||||||
stmt = stmt.where(self.db.chunks.c.task_id == task_id)
|
stmt = stmt.where(self.db.chunks.c.task_id == task_id)
|
||||||
|
|
||||||
rows = conn.execute(stmt).fetchall()
|
rows = conn.execute(stmt).fetchall()
|
||||||
return {"results": [{"content": r[3], "meta_info": r[4]} for r in rows], "msg": "Fallback found"}
|
return {"results": [{"content": r[3], "meta_info": r[4]} for r in rows], "msg": "Fallback found"}
|
||||||
|
|
||||||
data_service = DataService()
|
data_service = DataService()
|
||||||
@@ -5,16 +5,16 @@ from backend.core.config import settings
|
|||||||
class LLMService:
|
class LLMService:
|
||||||
"""
|
"""
|
||||||
LLM 服务封装层
|
LLM 服务封装层
|
||||||
负责与 DashScope 或其他模型供应商交互
|
负责与 DashScope (通义千问/GTE) 交互,包括 Embedding 和 Rerank
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
dashscope.api_key = settings.DASHSCOPE_API_KEY
|
dashscope.api_key = settings.DASHSCOPE_API_KEY
|
||||||
|
|
||||||
def get_embedding(self, text: str, dimension: int = 1536):
|
def get_embedding(self, text: str, dimension: int = 1536):
|
||||||
"""生成文本向量"""
|
"""生成文本向量 (Bi-Encoder)"""
|
||||||
try:
|
try:
|
||||||
resp = dashscope.TextEmbedding.call(
|
resp = dashscope.TextEmbedding.call(
|
||||||
model=dashscope.TextEmbedding.Models.text_embedding_v4,
|
model=dashscope.TextEmbedding.Models.text_embedding_v4, # 或 v4,视你的数据库维度而定
|
||||||
input=text,
|
input=text,
|
||||||
dimension=dimension
|
dimension=dimension
|
||||||
)
|
)
|
||||||
@@ -27,4 +27,67 @@ class LLMService:
|
|||||||
print(f"[ERROR] Embedding Exception: {e}")
|
print(f"[ERROR] Embedding Exception: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def rerank(self, query: str, documents: list, top_n: int = 5):
|
||||||
|
"""
|
||||||
|
执行重排序 (Cross-Encoder)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
query: 用户问题
|
||||||
|
documents: 粗排召回的切片列表 (List[dict]),必须包含 'content' 字段
|
||||||
|
top_n: 最终返回多少个结果
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[dict]: 排序后并截取 Top N 的文档列表,包含新的 'score'
|
||||||
|
"""
|
||||||
|
if not documents:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# 1. 准备输入数据
|
||||||
|
# Rerank API 需要纯文本列表,但我们需要保留 documents 里的 meta_info 和 id
|
||||||
|
# 所以我们提取 content 给 API,拿到 index 后再映射回去
|
||||||
|
doc_contents = [doc.get('content', '') for doc in documents]
|
||||||
|
|
||||||
|
# 如果文档太多(比如超过 100 个),建议先截断,避免 API 超时或报错
|
||||||
|
if len(doc_contents) > 50:
|
||||||
|
doc_contents = doc_contents[:50]
|
||||||
|
documents = documents[:50]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 2. 调用 DashScope GTE-Rerank
|
||||||
|
resp = dashscope.TextReRank.call(
|
||||||
|
model='gte-rerank',
|
||||||
|
query=query,
|
||||||
|
documents=doc_contents,
|
||||||
|
top_n=top_n,
|
||||||
|
return_documents=False # 我们只需要索引和分数,不需要它把文本再传回来
|
||||||
|
)
|
||||||
|
|
||||||
|
if resp.status_code == HTTPStatus.OK:
|
||||||
|
# 3. 结果重组
|
||||||
|
# API 返回结构示例: output.results = [{'index': 2, 'relevance_score': 0.98}, {'index': 0, ...}]
|
||||||
|
reranked_results = []
|
||||||
|
|
||||||
|
for item in resp.output.results:
|
||||||
|
# 根据 API 返回的 index 找到原始文档对象
|
||||||
|
original_doc = documents[item.index]
|
||||||
|
|
||||||
|
# 更新分数为 Rerank 的精准分数 (通常是 0~1 之间的置信度)
|
||||||
|
original_doc['score'] = item.relevance_score
|
||||||
|
|
||||||
|
# 标记来源,方便调试知道这是 Rerank 过的
|
||||||
|
original_doc['reranked'] = True
|
||||||
|
|
||||||
|
reranked_results.append(original_doc)
|
||||||
|
|
||||||
|
return reranked_results
|
||||||
|
else:
|
||||||
|
print(f"[ERROR] Rerank API Error: {resp}")
|
||||||
|
# 降级策略:如果 Rerank 挂了,直接返回粗排的前 N 个
|
||||||
|
return documents[:top_n]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"[ERROR] Rerank Exception: {e}")
|
||||||
|
# 降级策略
|
||||||
|
return documents[:top_n]
|
||||||
|
|
||||||
llm_service = LLMService()
|
llm_service = LLMService()
|
||||||
62
docs/开发计划.md
Normal file
62
docs/开发计划.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# 下一步开发计划
|
||||||
|
|
||||||
|
## 2025.1.13
|
||||||
|
|
||||||
|
1. 知识库RAG
|
||||||
|
测试相关资料参考链接: <https://1988251901502969000zhuanlan.zhihu.com/p/>
|
||||||
|
|
||||||
|
- [ ] 参照主流知识库架构增减修改当前知识库字段
|
||||||
|
- [ ] 根据主流RAG测试要求完善知识库检索测试
|
||||||
|
- [ ] 开发LLM输出测试
|
||||||
|
- [ ] 横向对比不同检索方法或模型下的测试效果
|
||||||
|
2. 后端封装backend
|
||||||
|
|
||||||
|
1. v2API全面增补,废弃v1API,修改data_service.py里为v1保留的旧接口。
|
||||||
|
预期实现效果:
|
||||||
|
- [ ] 添加任务
|
||||||
|
- [ ] 查询任务
|
||||||
|
- [ ] 执行任务
|
||||||
|
- [ ] 获取任务状态
|
||||||
|
- [ ] 获取任务结果
|
||||||
|
- [ ] 知识库搜索
|
||||||
|
2. 包装成MCP
|
||||||
|
|
||||||
|
3. dify节点
|
||||||
|
|
||||||
|
- [ ] 完成dify的LLM输出工具,主要负责处理搜索逻辑和问题分类,调用api,发布工具。
|
||||||
|
也可能直接在backend里全部实现,直接集成到bot里
|
||||||
|
|
||||||
|
4. firecrawl与替代方案调研
|
||||||
|
|
||||||
|
1. firecrawl付费方案
|
||||||
|
- 常规订阅链接: <https://www.firecrawl.dev/pricing>
|
||||||
|
注意: 此链接下均是按时间订阅的,每月限制额度, 可额外购买, 但是考虑到客户使用的时候可能会固定时间集中使用(**采集新wiki, 更新旧wiki**)
|
||||||
|
- 企业订阅方案: 需要联系firecrawl订制
|
||||||
|
2. firecrawl开源方案
|
||||||
|
- 开源github链接: <https://github.com/mendableai/firecrawl>
|
||||||
|
- 优劣对比
|
||||||
|
|
||||||
|
| 对比维度 | 开源版 (Self-hosted) | 云服务版 (Cloud / SaaS) | 核心差异说明 |
|
||||||
|
| :-------------------------- | :------------------------------------------------------------- | :-------------------------------------------------------------- | :------------------------------------------------------------------ |
|
||||||
|
| **部署方式** | 🐳 **Docker 自托管**<br>需自行配置服务器环境 | ☁️ **开箱即用**<br>注册 API Key 即可调用 | 云版省去了复杂的环境搭建过程。 |
|
||||||
|
| **成本** | 🆓 **软件免费**<br>需支付服务器/带宽费用 | 💰 **订阅制**<br>按 Credits (页数) 计费,有免费额度 | 量大且有闲置服务器时开源版更省钱;量小或追求稳定时云版更划算。 |
|
||||||
|
| **反爬虫绕过**<br>(Proxies) | ❌ **弱 / 需自行配置**<br>默认使用本机 IP,易被 Cloudflare 拦截 | ✅ **强 / 内置智能代理**<br>自动轮换 IP,擅长绕过 WAF 和人机验证 | **这是最大的区别。** 云版包含商业代理池成本,开源版需你自己买代理。 |
|
||||||
|
| **维护难度** | 🛠 **高**<br>需维护 Redis、队列、无头浏览器更新 | ☕ **零**<br>官方团队维护基础设施 | 开源版遇到浏览器崩溃或内存泄漏需自己修。 |
|
||||||
|
| **并发与性能** | ⚠️ **受限于硬件**<br>取决于你的服务器配置 | 🚀 **弹性扩容**<br>支持高并发,速度通常更快 | 云版对并行抓取做了优化。 |
|
||||||
|
| **JS 渲染** | ✅ **支持**<br>需配置 Playwright/Puppeteer | ✅ **支持**<br>默认优化,加载更稳定 | 两者核心引擎相同,但云版资源分配更合理。 |
|
||||||
|
| **数据隐私** | 🔒 **高 (本地化)**<br>数据不经过第三方服务器 | ☁️ **中**<br>数据需传输至 Firecrawl 服务器处理 | 对数据合规性要求极高的场景(如金融/医疗)首选开源版。 |
|
||||||
|
| **适用场景** | 极客折腾、内网抓取、低频低难度网站、数据极度敏感 | 商业项目、大规模抓取、高难度网站 (有反爬)、追求稳定性 | |
|
||||||
|
|
||||||
|
3. 自主研发爬虫
|
||||||
|
1. 反爬机制: 维基百科对IP有访问频率限制, 且有验证码, 需自行处理
|
||||||
|
2. 动态内容: 维基百科有很多动态内容, 如表格, 图片等, 需自行处理, 如使用Selenium等工具模拟浏览器行为
|
||||||
|
|
||||||
|
**Firecrawl方案和替代评估总结**
|
||||||
|
假设客户的产品需求是: 从不同的网站爬取文档制成知识库, 并且需要定期维护, 那么其实只有在爬取新的站点和维护旧的站点的时候会集中使用firecrawl的额度, 主要特点是**使用时间集中**且**使用时段内额度需求量很大**以及**优先要保证爬虫模块的稳定性**
|
||||||
|
因此最推荐的方案是: 定时采购额度, 但是考虑到常规的订阅只有按时间计费, 而客户的需求是**定期维护**, 而**按使用额度计费, 即企业协商订阅**的方案是最符合客户需求的.
|
||||||
|
|
||||||
|
| 类别 | 成本 | 困难 |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| 闭源版 | 购买定制服务, 如果企业长期话成本可能几千? 按年也就一年左右的量够用了 | 用起来很顺手, 目前的接口返回值基本能满足开发需求 |
|
||||||
|
| 开源版 | 需要准备IP池之类的反爬机制, 需要为IP代理付费 | 配置和学习相关的运营维护 |
|
||||||
|
| 自主研发 | 除了研发的时间精力外, 也必需IP池的购买 | 高 |
|
||||||
192
scripts/evaluate_rag.py
Normal file
192
scripts/evaluate_rag.py
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
# 将项目根目录加入路径
|
||||||
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
from backend.core.config import settings
|
||||||
|
|
||||||
|
# ================= ⚙️ 配置区域 =================
|
||||||
|
BASE_URL = "http://127.0.0.1:8000"
|
||||||
|
TASK_ID = 19 # ⚠️ 请修改为你实际爬取数据的 Task ID
|
||||||
|
# 自动适配操作系统路径
|
||||||
|
TEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_dataset.json")
|
||||||
|
# ==============================================
|
||||||
|
|
||||||
|
class Colors:
|
||||||
|
HEADER = '\033[95m'
|
||||||
|
OKBLUE = '\033[94m'
|
||||||
|
OKCYAN = '\033[96m'
|
||||||
|
OKGREEN = '\033[92m'
|
||||||
|
WARNING = '\033[93m'
|
||||||
|
FAIL = '\033[91m'
|
||||||
|
ENDC = '\033[0m'
|
||||||
|
BOLD = '\033[1m'
|
||||||
|
|
||||||
|
def get_rag_results(query):
|
||||||
|
"""
|
||||||
|
调用搜索接口并记录耗时
|
||||||
|
"""
|
||||||
|
start_ts = time.time()
|
||||||
|
try:
|
||||||
|
# 调用 V2 接口,该接口内部已集成 混合检索 -> Rerank
|
||||||
|
res = requests.post(
|
||||||
|
f"{BASE_URL}/api/v2/search",
|
||||||
|
json={"query": query, "task_id": TASK_ID, "limit": 5}, # 获取 Top 5
|
||||||
|
timeout=15
|
||||||
|
)
|
||||||
|
latency = (time.time() - start_ts) * 1000 # ms
|
||||||
|
|
||||||
|
if res.status_code != 200:
|
||||||
|
print(f"{Colors.FAIL}❌ API Error {res.status_code}: {res.text}{Colors.ENDC}")
|
||||||
|
return [], 0
|
||||||
|
|
||||||
|
res_json = res.json()
|
||||||
|
chunks = res_json.get('data', {}).get('results', [])
|
||||||
|
return chunks, latency
|
||||||
|
except Exception as e:
|
||||||
|
print(f"{Colors.FAIL}❌ 请求异常: {e}{Colors.ENDC}")
|
||||||
|
return [], 0
|
||||||
|
|
||||||
|
def check_hit(content, keywords):
|
||||||
|
"""
|
||||||
|
检查切片相关性 (Relevance Check)
|
||||||
|
使用关键词匹配作为 Ground Truth 的轻量级验证。
|
||||||
|
"""
|
||||||
|
if not keywords: return True # 拒答题或开放性题目跳过关键词检查
|
||||||
|
if not content: return False
|
||||||
|
|
||||||
|
content_lower = content.lower()
|
||||||
|
for k in keywords:
|
||||||
|
if k.lower() in content_lower:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def run_evaluation():
|
||||||
|
# 1. 加载测试集
|
||||||
|
if not os.path.exists(TEST_FILE):
|
||||||
|
print(f"{Colors.FAIL}❌ 找不到测试文件: {TEST_FILE}{Colors.ENDC}")
|
||||||
|
print("请确保 scripts/test_dataset.json 文件存在。")
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(TEST_FILE, 'r', encoding='utf-8') as f:
|
||||||
|
dataset = json.load(f)
|
||||||
|
|
||||||
|
print(f"{Colors.HEADER}🚀 开始全维度量化评测 (Task ID: {TASK_ID}){Colors.ENDC}")
|
||||||
|
print(f"📄 测试集包含 {len(dataset)} 个样本\n")
|
||||||
|
|
||||||
|
# === 统计容器 ===
|
||||||
|
metrics = {
|
||||||
|
"p_at_1": [], # Precision@1: 正确答案排第1
|
||||||
|
"hit_at_5": [], # HitRate@5: 正确答案在前5
|
||||||
|
"mrr": [], # MRR: 倒数排名分数
|
||||||
|
"latency": [] # 耗时
|
||||||
|
}
|
||||||
|
|
||||||
|
# === 开始循环测试 ===
|
||||||
|
for i, item in enumerate(dataset):
|
||||||
|
query = item['query']
|
||||||
|
print(f"📝 Case {i+1}: {Colors.BOLD}{query}{Colors.ENDC}")
|
||||||
|
|
||||||
|
# 执行检索
|
||||||
|
chunks, latency = get_rag_results(query)
|
||||||
|
metrics['latency'].append(latency)
|
||||||
|
|
||||||
|
# 计算单次指标
|
||||||
|
is_hit_at_5 = 0
|
||||||
|
p_at_1 = 0
|
||||||
|
reciprocal_rank = 0.0
|
||||||
|
hit_position = -1
|
||||||
|
hit_chunk = None
|
||||||
|
|
||||||
|
# 遍历 Top 5 结果
|
||||||
|
for idx, chunk in enumerate(chunks):
|
||||||
|
if check_hit(chunk['content'], item['keywords']):
|
||||||
|
# 命中!
|
||||||
|
is_hit_at_5 = 1
|
||||||
|
hit_position = idx
|
||||||
|
reciprocal_rank = 1.0 / (idx + 1)
|
||||||
|
hit_chunk = chunk
|
||||||
|
|
||||||
|
# 如果是第1个就命中了
|
||||||
|
if idx == 0:
|
||||||
|
p_at_1 = 1
|
||||||
|
|
||||||
|
# 找到即停止 (MRR计算只需知道第一个正确答案的位置)
|
||||||
|
break
|
||||||
|
|
||||||
|
# 记录指标
|
||||||
|
metrics['p_at_1'].append(p_at_1)
|
||||||
|
metrics['hit_at_5'].append(is_hit_at_5)
|
||||||
|
metrics['mrr'].append(reciprocal_rank)
|
||||||
|
|
||||||
|
# 打印单行结果
|
||||||
|
if is_hit_at_5:
|
||||||
|
rank_display = f"Rank {hit_position + 1}"
|
||||||
|
color = Colors.OKGREEN if hit_position == 0 else Colors.OKCYAN
|
||||||
|
source = hit_chunk.get('source_url', 'Unknown')
|
||||||
|
|
||||||
|
# 跨语言污染检查 (简单规则)
|
||||||
|
warning = ""
|
||||||
|
if "/es/" in source and "Spanish" not in query: warning = f"{Colors.WARNING}[跨语言风险]{Colors.ENDC}"
|
||||||
|
elif "/zh/" in source and "如何" not in query and "什么" not in query: warning = f"{Colors.WARNING}[跨语言风险]{Colors.ENDC}"
|
||||||
|
|
||||||
|
print(f" {color}✅ 命中 ({rank_display}){Colors.ENDC} | MRR: {reciprocal_rank:.2f} | 耗时: {latency:.0f}ms {warning}")
|
||||||
|
else:
|
||||||
|
print(f" {Colors.FAIL}❌ 未命中{Colors.ENDC} | 预期关键词: {item['keywords']}")
|
||||||
|
|
||||||
|
# 稍微间隔,避免触发 API 频率限制
|
||||||
|
sleep(0.1)
|
||||||
|
|
||||||
|
# === 最终计算 ===
|
||||||
|
count = len(dataset)
|
||||||
|
if count == 0: return
|
||||||
|
|
||||||
|
avg_p1 = np.mean(metrics['p_at_1']) * 100
|
||||||
|
avg_hit5 = np.mean(metrics['hit_at_5']) * 100
|
||||||
|
avg_mrr = np.mean(metrics['mrr'])
|
||||||
|
avg_latency = np.mean(metrics['latency'])
|
||||||
|
p95_latency = np.percentile(metrics['latency'], 95)
|
||||||
|
|
||||||
|
print("\n" + "="*60)
|
||||||
|
print(f"{Colors.HEADER}📊 最终量化评估报告 (Evaluation Report){Colors.ENDC}")
|
||||||
|
print("="*60)
|
||||||
|
|
||||||
|
# 1. Precision@1 (最关键指标)
|
||||||
|
print(f"🥇 {Colors.BOLD}Precision@1 (首位精确率): {avg_p1:.1f}%{Colors.ENDC}")
|
||||||
|
print(f" - 意义: 用户能否直接得到正确答案。引入 Rerank 后此项应显著提高。")
|
||||||
|
|
||||||
|
# 2. Hit Rate / Recall@5
|
||||||
|
print(f"🥈 Hit Rate@5 (前五召回率): {avg_hit5:.1f}%")
|
||||||
|
print(f" - 意义: 数据库是否真的包含答案。如果此项低,说明爬虫没爬全或混合检索漏了。")
|
||||||
|
|
||||||
|
# 3. MRR
|
||||||
|
print(f"🥉 MRR (平均倒数排名): {avg_mrr:.3f} / 1.0")
|
||||||
|
|
||||||
|
# 4. Latency
|
||||||
|
print(f"⚡ Avg Latency (平均耗时): {avg_latency:.0f} ms")
|
||||||
|
print(f"⚡ P95 Latency (95%分位): {p95_latency:.0f} ms")
|
||||||
|
print("="*60)
|
||||||
|
|
||||||
|
# === 智能诊断 ===
|
||||||
|
print(f"{Colors.HEADER}💡 诊断建议:{Colors.ENDC}")
|
||||||
|
|
||||||
|
if avg_p1 < avg_hit5:
|
||||||
|
gap = avg_hit5 - avg_p1
|
||||||
|
print(f" • {Colors.WARNING}排序优化空间大{Colors.ENDC}: 召回了但没排第一的情况占 {gap:.1f}%。")
|
||||||
|
print(" -> 你的 Rerank 模型生效了吗?或者 Rerank 的 Top N 截断是否太早?")
|
||||||
|
elif avg_p1 > 80:
|
||||||
|
print(f" • {Colors.OKGREEN}排序效果优秀{Colors.ENDC}: 绝大多数正确答案都排在第一位。")
|
||||||
|
|
||||||
|
if avg_hit5 < 50:
|
||||||
|
print(f" • {Colors.FAIL}召回率过低{Colors.ENDC}: 可能是测试集关键词太生僻,或者 TS_RANK 权重过低。")
|
||||||
|
|
||||||
|
if avg_latency > 2000:
|
||||||
|
print(f" • {Colors.WARNING}系统响应慢{Colors.ENDC}: 2秒以上。检查是否因为 Rerank 文档过多(建议 <= 50个)。")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run_evaluation()
|
||||||
86
scripts/test_dataset.json
Normal file
86
scripts/test_dataset.json
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"type": "core_function",
|
||||||
|
"query": "What is the difference between /scrape and /map endpoints?",
|
||||||
|
"ground_truth": "/map is used to crawl a website and retrieve all URLs, while /scrape is used to extract content from a specific URL.",
|
||||||
|
"keywords": ["URL", "content", "specific", "retrieve"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 2,
|
||||||
|
"type": "new_feature",
|
||||||
|
"query": "What is the Deep Research feature?",
|
||||||
|
"ground_truth": "Deep Research is an alpha feature allowing agents to perform iterative research tasks.",
|
||||||
|
"keywords": ["alpha", "iterative", "research", "agent"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 3,
|
||||||
|
"type": "integration",
|
||||||
|
"query": "How can I integrate Firecrawl with ChatGPT?",
|
||||||
|
"ground_truth": "Firecrawl can be integrated via MCP (Model Context Protocol).",
|
||||||
|
"keywords": ["MCP", "Model Context Protocol", "setup"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 4,
|
||||||
|
"type": "multilingual_zh",
|
||||||
|
"query": "如何进行私有化部署 (Self-host)?",
|
||||||
|
"ground_truth": "你需要使用 Docker Compose 进行部署,文档位于 /self-host/quick-start/docker-compose。",
|
||||||
|
"keywords": ["Docker", "Compose", "self-host", "deploy"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 5,
|
||||||
|
"type": "api_detail",
|
||||||
|
"query": "What parameters are available for the /extract endpoint?",
|
||||||
|
"ground_truth": "The extract endpoint allows defining a schema for structured data extraction.",
|
||||||
|
"keywords": ["schema", "structured", "prompt"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 6,
|
||||||
|
"type": "numeric",
|
||||||
|
"query": "How do credits work for the scrape endpoint?",
|
||||||
|
"ground_truth": "Specific credit usage details are in the /credits endpoint documentation (usually 1 credit per page for basic scrape).",
|
||||||
|
"keywords": ["credit", "usage", "cost"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 7,
|
||||||
|
"type": "negative_test",
|
||||||
|
"query": "Does Firecrawl support scraping video content from YouTube?",
|
||||||
|
"ground_truth": "The documentation does not mention video scraping support.",
|
||||||
|
"keywords": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 8,
|
||||||
|
"type": "advanced",
|
||||||
|
"query": "How to use batch scrape?",
|
||||||
|
"ground_truth": "Use the /batch/scrape endpoint to submit multiple URLs at once.",
|
||||||
|
"keywords": ["batch", "multiple", "URLs"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 9,
|
||||||
|
"type": "automation",
|
||||||
|
"query": "Is there an n8n integration guide?",
|
||||||
|
"ground_truth": "Yes, there is a workflow automation guide for n8n.",
|
||||||
|
"keywords": ["n8n", "workflow", "automation"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 10,
|
||||||
|
"type": "security",
|
||||||
|
"query": "Where can I find information about webhook security?",
|
||||||
|
"ground_truth": "Information is available in the Webhooks Security section.",
|
||||||
|
"keywords": ["webhook", "security", "signature"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 11,
|
||||||
|
"type": "cross_lingual_trap",
|
||||||
|
"query": "Explain the crawl features in French.",
|
||||||
|
"ground_truth": "The system should ideally retrieve the French document (/fr/features/crawl) and answer in French.",
|
||||||
|
"keywords": ["fonctionnalités", "crawl", "fr"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": 12,
|
||||||
|
"type": "api_history",
|
||||||
|
"query": "How to check historical token usage?",
|
||||||
|
"ground_truth": "Use the /token-usage-historical endpoint.",
|
||||||
|
"keywords": ["token", "usage", "historical"]
|
||||||
|
}
|
||||||
|
]
|
||||||
Reference in New Issue
Block a user