File size: 1,841 Bytes
eb1a03e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from src.helper import load_pdf, text_split, download_hugging_face_embeddings
DATA_PATH = r'G:\Chatbot\data'
DB_FAISS_PATH = r'G:\Chatbot\data\vector'
'''extracted_data = load_pdf(r"G:\Chatbot\data")
text_chunks = text_split(extracted_data)
embeddings = download_hugging_face_embeddings()
# Initializing the Faiss
db = FAISS.from_documents(text_chunks, embeddings)
db.save_local(DB_FAISS_PATH)
# I change the above DB_FAISS_PATH
# db.save_local(r"G:\Chatbot\DB_FAISS_PATH")'''
# Load the data from the PDF file
def create_vector_db():
extracted_data = load_pdf(DATA_PATH)
text_chunks = text_split(extracted_data)
embeddings = download_hugging_face_embeddings()
db = FAISS.from_documents(text_chunks, embeddings)
db.save_local(DB_FAISS_PATH)
print("### db is created")
'''# Create vector database
def create_vector_db():
loader = DirectoryLoader(DATA_PATH,
glob='*.pdf',
loader_cls=PyPDFLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
chunk_overlap=50)
texts = text_splitter.split_documents(documents)
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
model_kwargs={'device': 'cuda'})
db = FAISS.from_documents(texts, embeddings)
db.save_local(DB_FAISS_PATH)
create_vector_db() # Call the function directly in the cell'''
|