-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllama_integration.py
More file actions
48 lines (39 loc) · 1.67 KB
/
llama_integration.py
File metadata and controls
48 lines (39 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
"""
LlamaIndex integration for document retrieval.
"""
from typing import List
from llama_index.core import Document
from .openai import EmbeddingClient
from utils.logger import get_logger
logger = get_logger(__name__)
# Create a module-level embedding client instance
_embedding_client = EmbeddingClient()
def llama_index_retrieve_documents(query: str, database_path: str, top_k: int = 5,
search_func=None, get_chunk_func=None) -> List[Document]:
"""
Return llama_index.core.Document objects for the top_k matching chunks using sqlite-vector.
Args:
query: Search query text
database_path: Path to project database
top_k: Number of results to return
search_func: Function to search vectors (injected from analyzer)
get_chunk_func: Function to get chunk text (injected from analyzer)
Returns:
List of Document objects with chunk text and metadata
"""
if search_func is None or get_chunk_func is None:
raise ValueError("search_func and get_chunk_func must be provided")
q_emb = _embedding_client.embed_text(query, file_path="<query>", chunk_index=0)
if not q_emb:
return []
rows = search_func(database_path, q_emb, top_k=top_k)
docs: List[Document] = []
for r in rows:
fid = r.get("file_id")
path = r.get("path")
chunk_idx = r.get("chunk_index", 0)
score = r.get("score", 0.0)
chunk_text = get_chunk_func(database_path, fid, chunk_idx) or ""
doc = Document(text=chunk_text, extra_info={"path": path, "file_id": fid, "chunk_index": chunk_idx, "score": score})
docs.append(doc)
return docs