- 直接上代码+注释
- 有意尝试可交流
- 效果正在验证中。
###1.短文本处理(<500tokens)
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('all-MiniLM-L6-v2') # 384维小型模型def process_short(text):"""直接全文本编码"""return model.encode(text, convert_to_tensor=True)# 示例
short_text = "自然语言处理的基础概念" # 长度约15 tokens
vector = process_short(short_text)
2. 中长文本处理 (500-2000 tokens)
from langchain_text_splitters import RecursiveCharacterTextSplitterdef process_medium(text):"""重叠分块策略"""splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50,separators=["\n\n", "\n", "。", "!", "?"])chunks = splitter.split_text(text)return [model.encode(chunk) for chunk in chunks]# 示例
medium_text = "机器学习发展历史...(约1500字)" # 约1800 tokens
chunk_vectors = process_medium(medium_text)
3. 长文本处理 (2000-20000 tokens)
import spacydef process_long(text):"""语义分块+摘要增强"""# 加载语义分割模型nlp = spacy.load("zh_core_web_sm") doc = nlp(text)# 按段落分割chunks = [sent.text for sent in doc.sents]# 生成章节摘要summary_model = SentenceTransformer('uer/sbert-base-chinese-nli')summaries = [summary_model.encode(chunk[:200]) for chunk in chunks]return chunks, summaries# 示例
long_text = "人工智能技术白皮书...(约2万字)" # 约20000 tokens
text_chunks, summary_vecs = process_long(long_text)
4. 超长文本处理 (20000-200000 tokens)
import faiss
import numpy as npclass HierarchicalIndex:def __init__(self):# 两级索引结构self.summary_index = faiss.IndexFlatL2(384)self.chunk_index = faiss.IndexIVFPQ(faiss.IndexFlatL2(384), 384, 100, 16, 8)self.metadata = []def add_document(self, text):# 生成段落级摘要chunks, summaries = process_long(text)# 构建索引summary_vecs = np.array(summaries).astype('float32')chunk_vecs = np.array([model.encode(c) for c in chunks]).astype('float32')self.summary_index.add(summary_vecs)self.chunk_index.add(chunk_vecs)self.metadata.extend(chunks)def search(self, query, k=5):# 先检索摘要层query_vec = model.encode(query).astype('float32')_, sum_indices = self.summary_index.search(np.array([query_vec]), 10)# 精搜相关块target_chunks = [self.chunk_index.reconstruct(i) for i in sum_indices]target_chunks = np.array(target_chunks).astype('float32')_, chunk_indices = self.chunk_index.search(target_chunks, k)return [self.metadata[i] for i in chunk_indices]# 使用示例
hindex = HierarchicalIndex()
hindex.add_document("某领域技术文档...(约15万字)") # 约200000 tokens
results = hindex.search("深度学习在医疗影像的应用")
5. 海量文本处理 (>200000 tokens)
import dask.dataframe as dd
from dask.distributed import Clientdef process_extreme(file_path):"""分布式处理方案"""client = Client(n_workers=4) # 启动Dask集群# 分块读取df = dd.read_parquet(file_path, chunksize=100000) # 并行编码df['vector'] = df['text'].map_partitions(lambda s: s.apply(model.encode),meta=('vector', object))# 构建分布式索引df.to_parquet("encoded_data.parquet", engine="pyarrow")# 示例(处理100万条文本)
process_extreme("massive_data.parquet")
性能优化对照表
文本长度 | 处理策略 | 索引类型 | 响应时间 | 内存消耗 |
---|---|---|---|---|
<500 | 直接编码 | FlatIndex | <10ms | 1MB |
2000 | 重叠分块 | IVF+PQ | 50-100ms | 50MB |
20000 | 语义分块+摘要索引 | 二级索引 | 200-500ms | 300MB |
200000 | 层次化索引 | IVFOPQ+ProductQuant | 1-2s | 2GB |
>200000 | 分布式处理 | 分片索引 | 10s+ | 集群资源 |
关键处理技术
- 滑动窗口:通过
chunk_overlap
保留上下文连续性 - 语义分块:使用spacy进行句子边界检测
- 层次化索引:摘要层加速粗筛,块层保证精度
- 量化压缩:PQ算法减少内存占用(精度损失