完成节点
This commit is contained in:
51
nodes/chunk_and_embedding.json
Normal file
51
nodes/chunk_and_embedding.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"res_json": [
|
||||
{
|
||||
"data": {
|
||||
"markdown": "[Skip to main content](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme#content-area)\n\n[Dify Docs home page](https://docs.dify.ai/)\n\nLatest\n\n\nEnglish\n\nSearch...\n\nCtrl K\n\nSearch...\n\nNavigation\n\n1\\. Import Text Data\n\n1\\. Import Text Data\n\nClick on Knowledge in the main navigation bar of Dify. On this page, you can see your existing knowledge bases. Click **Create Knowledge** to enter the setup wizard. The Knowledge supports the import of the following two online data:Click **Knowledge** in the top navigation bar of the Dify, then select **Create Knowledge**. You can upload documents to the knowledge or importing online data to it.\n\n## [](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme\\#upload-local-files) Upload Local Files\n\nDrag and drop or select files to upload. The number of files allowed for **batch upload** depends on your [subscription plan](https://dify.ai/pricing).**Limitations for uploading documents:**\n\n- The upload size limit for a single document is 15MB;\n- Different [subscription plans](https://dify.ai/pricing) for the SaaS version limit **batch upload numbers, total document uploads, and vector storage**\n\n\n\n## [](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme\\#import-from-online-data-source) Import From Online Data Source\n\nWhen creating a **Knowledge**, you can import data from online sources. The knowledge supports the following two types of online data: [**1.1 Import Data from Notion** \\\\\n\\\\\nLearn how to import data from Notion](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/sync-from-notion) [**1.2 Sync from Website** \\\\\n\\\\\nLearn how to sync data from websites](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/sync-from-website)\n\nIf a knowledge base is set up to use online data, you won’t be able to add local documents later or switch it to a local file-based mode. This prevents a single knowledge base from mixing multiple data sources, avoiding management complications.\n\n## [](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme\\#adding-data-later) Adding Data Later\n\nIf you haven’t prepared your documents or other content yet, simply create an empty knowledge first. You can then upload local files or import online data whenever you’re ready.\n\nWas this page helpful?\n\nYesNo\n\n[Previous](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/introduction) [1.1 Sync Data from Notion\\\\\n\\\\\nNext](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/sync-from-notion)\n\nCtrl+I\n\nOn this page\n\n- [Upload Local Files](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme#upload-local-files)\n- [Import From Online Data Source](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme#import-from-online-data-source)\n- [Adding Data Later](https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme#adding-data-later)\n\nAssistant\n\nResponses are generated using AI and may contain mistakes.\n\n",
|
||||
"metadata": {
|
||||
"apple-mobile-web-app-title": "Dify Docs",
|
||||
"application-name": "Dify Docs",
|
||||
"cacheState": "hit",
|
||||
"cachedAt": "2025-12-09T08:12:32.803Z",
|
||||
"canonical": "https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme",
|
||||
"charset": "utf-8",
|
||||
"concurrencyLimited": true,
|
||||
"concurrencyQueueDurationMs": 371,
|
||||
"contentType": "text/html; charset=utf-8",
|
||||
"creditsUsed": 1,
|
||||
"favicon": "https://docs.dify.ai/mintlify-assets/_mintlify/favicons/dify-6c0370d8/tWYYD8GkT0MUJV0z/_generated/favicon/favicon-16x16.png",
|
||||
"generator": "Mintlify",
|
||||
"language": "en",
|
||||
"msapplication-TileColor": "#0060FF",
|
||||
"msapplication-config": "/mintlify-assets/_mintlify/favicons/dify-6c0370d8/tWYYD8GkT0MUJV0z/_generated/favicon/browserconfig.xml",
|
||||
"next-size-adjust": "",
|
||||
"og:image": "https://dify-6c0370d8.mintlify.app/mintlify-assets/_next/image?url=%2F_mintlify%2Fapi%2Fog%3Fdivision%3D1.%2BImport%2BText%2BData%26title%3D1.%2BImport%2BText%2BData%26logoLight%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fd05cfc6ebe48f725d171dc71c64a5d16.svg%26logoDark%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fc51f1cda47c1d9a4a162d7736f6e4c53.svg%26primaryColor%3D%25230060FF%26lightColor%3D%2523688FE8%26darkColor%3D%25230034FF%26backgroundLight%3D%2523ffffff%26backgroundDark%3D%25230b0c0f&w=1200&q=100",
|
||||
"og:image:height": "630",
|
||||
"og:image:width": "1200",
|
||||
"og:site_name": "Dify Docs",
|
||||
"og:title": "1. Import Text Data - Dify Docs",
|
||||
"og:type": "website",
|
||||
"og:url": "https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme",
|
||||
"ogImage": "https://dify-6c0370d8.mintlify.app/mintlify-assets/_next/image?url=%2F_mintlify%2Fapi%2Fog%3Fdivision%3D1.%2BImport%2BText%2BData%26title%3D1.%2BImport%2BText%2BData%26logoLight%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fd05cfc6ebe48f725d171dc71c64a5d16.svg%26logoDark%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fc51f1cda47c1d9a4a162d7736f6e4c53.svg%26primaryColor%3D%25230060FF%26lightColor%3D%2523688FE8%26darkColor%3D%25230034FF%26backgroundLight%3D%2523ffffff%26backgroundDark%3D%25230b0c0f&w=1200&q=100",
|
||||
"ogTitle": "1. Import Text Data - Dify Docs",
|
||||
"ogUrl": "https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme",
|
||||
"proxyUsed": "basic",
|
||||
"scrapeId": "019b024f-f76e-746b-b13c-6ca4884fdd64",
|
||||
"sourceURL": "https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme",
|
||||
"statusCode": 200,
|
||||
"title": "1. Import Text Data - Dify Docs",
|
||||
"twitter:card": "summary_large_image",
|
||||
"twitter:image": "https://dify-6c0370d8.mintlify.app/mintlify-assets/_next/image?url=%2F_mintlify%2Fapi%2Fog%3Fdivision%3D1.%2BImport%2BText%2BData%26title%3D1.%2BImport%2BText%2BData%26logoLight%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fd05cfc6ebe48f725d171dc71c64a5d16.svg%26logoDark%3Dhttps%253A%252F%252Fassets-docs.dify.ai%252F2025%252F05%252Fc51f1cda47c1d9a4a162d7736f6e4c53.svg%26primaryColor%3D%25230060FF%26lightColor%3D%2523688FE8%26darkColor%3D%25230034FF%26backgroundLight%3D%2523ffffff%26backgroundDark%3D%25230b0c0f&w=1200&q=100",
|
||||
"twitter:image:height": "630",
|
||||
"twitter:image:width": "1200",
|
||||
"twitter:title": "1. Import Text Data - Dify Docs",
|
||||
"url": "https://docs.dify.ai/en/use-dify/knowledge/create-knowledge/import-text-data/readme",
|
||||
"viewport": "width=device-width, initial-scale=1"
|
||||
},
|
||||
"warning": "This scrape job was throttled at your current concurrency limit. If you'd like to scrape faster, you can upgrade your plan."
|
||||
},
|
||||
"success": true
|
||||
}
|
||||
]
|
||||
}
|
||||
125
nodes/chunk_and_embedding.py
Normal file
125
nodes/chunk_and_embedding.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import json
|
||||
import re
|
||||
import requests
|
||||
|
||||
def text_cleaning(text: str) -> str:
|
||||
"""
|
||||
对文本进行清洗,移除多余空格、换行符等
|
||||
"""
|
||||
text = re.sub(r'\s+', ' ', text) # 替换多个空格为一个空格
|
||||
text = text.strip() # 移除首尾空格
|
||||
return text
|
||||
|
||||
def text_to_chunks(text: str):
|
||||
chunk_size = 800
|
||||
overlap = 100 # 100 字符重叠,意思是每块文本之间有100个字符的重叠
|
||||
step = chunk_size - overlap
|
||||
|
||||
chunks = []
|
||||
text_len = len(text)
|
||||
|
||||
if text_len < 50:
|
||||
chunks.append(text)
|
||||
else:
|
||||
start = 0
|
||||
while start < text_len:
|
||||
end = min(start + chunk_size, text_len)
|
||||
chunk_content = text[start:end]
|
||||
|
||||
# 防止切出过短的碎片,或者是最后一块
|
||||
if len(chunk_content) > 50 or start + step >= text_len:
|
||||
chunks.append(chunk_content)
|
||||
|
||||
start += step
|
||||
return chunks
|
||||
|
||||
|
||||
def chunks_embedding(texts: list[str], api_key: str) -> list[list[float]]:
|
||||
if not texts:
|
||||
return []
|
||||
|
||||
MODEL_NAME = "text-embedding-v4"
|
||||
url = "https://dashscope.aliyuncs.com/api/v1/services/embeddings/text-embedding/text-embedding"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
payload = {
|
||||
"model": MODEL_NAME,
|
||||
"input": {"texts": texts},
|
||||
"parameters": {"text_type": "document", "dimension": 1536}
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload, timeout=60)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
|
||||
if "output" in result and "embeddings" in result["output"]:
|
||||
embeddings_list = result["output"]["embeddings"]
|
||||
embeddings_list.sort(key=lambda x: x["text_index"])
|
||||
|
||||
# --- 核心修复:对每个浮点数保留 8 位小数,解决精度过高报错 ---
|
||||
final_vectors = []
|
||||
for item in embeddings_list:
|
||||
# 将每个 float 限制在 8 位精度以内
|
||||
rounded_vector = [round(float(val), 8) for val in item["embedding"]]
|
||||
final_vectors.append(rounded_vector)
|
||||
return final_vectors
|
||||
else:
|
||||
return [None] * len(texts)
|
||||
except Exception as e:
|
||||
print(f"Alibaba Embedding Error: {e}")
|
||||
return [None] * len(texts)
|
||||
|
||||
def main(scrape_json: list, DASHSCOPE_API_KEY: str) -> dict:
|
||||
"""
|
||||
输入: res_json (Firecrawl结果), DASHSCOPE_API_KEY (阿里API Key)
|
||||
"""
|
||||
|
||||
# --- 1. 解析 Firecrawl JSON (通用容错解析) ---
|
||||
scrape_obj = scrape_json[0]
|
||||
if not scrape_obj["success"]:
|
||||
return {"results": []}
|
||||
data = scrape_obj.get("data", [])
|
||||
# 获取原始内容
|
||||
text = data.get("markdown", "")
|
||||
metadata = data.get("metadata", {})
|
||||
warning = data.get("warning", "")
|
||||
# =======================================================
|
||||
# --- 2. 通用 Markdown 清洗 (Generic Cleaning) ---
|
||||
# =======================================================
|
||||
text = text_cleaning(text)
|
||||
|
||||
# --- 3. 安全切片 (Safe Chunking) ---
|
||||
# 800 字符切片,100 字符重叠
|
||||
chunks = text_to_chunks(text)
|
||||
|
||||
# --- 4. 向量化 (Call Alibaba) ---
|
||||
vectors = []
|
||||
if chunks:
|
||||
# 这里传入 DASHSCOPE_API_KEY
|
||||
vectors = chunks_embedding(chunks, DASHSCOPE_API_KEY)
|
||||
|
||||
# 双重保险:确保向量列表长度一致
|
||||
if len(vectors) != len(chunks):
|
||||
vectors = [None] * len(chunks)
|
||||
|
||||
# --- 5. 构造 SQL 数据 ---
|
||||
result_list = []
|
||||
|
||||
for idx, content in enumerate(chunks):
|
||||
clean_content = content.strip() # 清洗首尾空白
|
||||
if not clean_content: continue
|
||||
|
||||
result_list.append({
|
||||
"source_url": metadata.get("sourceURL", ""),
|
||||
"title": metadata.get("title", ""),
|
||||
"content": clean_content,
|
||||
"chunk_index": idx,
|
||||
"embedding": vectors[idx]
|
||||
})
|
||||
|
||||
return {
|
||||
"results": result_list
|
||||
}
|
||||
22
nodes/parse_firecrawl_map.py
Normal file
22
nodes/parse_firecrawl_map.py
Normal file
@@ -0,0 +1,22 @@
|
||||
def main(map_json: list[dict]):
|
||||
"""
|
||||
将Firecrawl Map节点的输出转换为干净的输出,避免杂七杂八的数据干扰
|
||||
输入: Firecrawl Map节点的输出,结构如下
|
||||
"map_json": [
|
||||
{
|
||||
"links": [
|
||||
"http://example.com/page1",
|
||||
"http://example.com/page2"
|
||||
],
|
||||
"success": true,
|
||||
},
|
||||
]
|
||||
因为比较简单而且与firecrawl组件绑定比,所以就直接main里写完了
|
||||
"""
|
||||
|
||||
map_obj = map_json[0]
|
||||
|
||||
return {
|
||||
"urls": map_obj["links"],
|
||||
"code": int(map_obj["success"]),
|
||||
}
|
||||
20
nodes/parse_pending_urls.py
Normal file
20
nodes/parse_pending_urls.py
Normal file
@@ -0,0 +1,20 @@
|
||||
def check_status(status_code: float, body: str):
|
||||
'''
|
||||
检查状态码和约定的返回值
|
||||
'''
|
||||
if status_code != 200:
|
||||
raise Exception(f"注册任务失败,状态码:{status_code}")
|
||||
if "code" not in body or body["code"] != 1:
|
||||
raise Exception(f"注册任务失败,返回值:{body}")
|
||||
|
||||
def main(status_code: float, body: str):
|
||||
try:
|
||||
check_status(status_code, body)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
urls = body["data"]["urls"]
|
||||
|
||||
return {
|
||||
"urls": urls
|
||||
}
|
||||
23
nodes/parse_register.py
Normal file
23
nodes/parse_register.py
Normal file
@@ -0,0 +1,23 @@
|
||||
def check_status(status_code: float, body: str):
|
||||
'''
|
||||
检查状态码和约定的返回值
|
||||
'''
|
||||
if status_code != 200:
|
||||
raise Exception(f"注册任务失败,状态码:{status_code}")
|
||||
if "code" not in body or body["code"] != 1:
|
||||
raise Exception(f"注册任务失败,返回值:{body}")
|
||||
|
||||
|
||||
def main(status_code: float, body: str):
|
||||
try:
|
||||
check_status(status_code, body)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
task_id = body["data"]["task_id"]
|
||||
is_new_task = body["data"]["is_new_task"]
|
||||
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"is_new_task": is_new_task
|
||||
}
|
||||
Reference in New Issue
Block a user