Spaces:
Runtime error
Runtime error
File size: 5,414 Bytes
cb10e0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
model_name = "qwen:0.5b-chat"
import os
os.system("sudo apt install lshw")
os.system("curl https://ollama.ai/install.sh | sh")
import nest_asyncio
nest_asyncio.apply()
import os
import asyncio
# Run Async Ollama
# Taken from: https://stackoverflow.com/questions/77697302/how-to-run-ollama-in-google-colab
# NB: You may need to set these depending and get cuda working depending which backend you are running.
# Set environment variable for NVIDIA library
# Set environment variables for CUDA
os.environ['PATH'] += ':/usr/local/cuda/bin'
# Set LD_LIBRARY_PATH to include both /usr/lib64-nvidia and CUDA lib directories
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64-nvidia:/usr/local/cuda/lib64'
async def run_process(cmd):
print('>>> starting', *cmd)
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
# define an async pipe function
async def pipe(lines):
async for line in lines:
print(line.decode().strip())
await asyncio.gather(
pipe(process.stdout),
pipe(process.stderr),
)
# call it
await asyncio.gather(pipe(process.stdout), pipe(process.stderr))
import asyncio
import threading
async def start_ollama_serve():
await run_process(['ollama', 'serve'])
def run_async_in_thread(loop, coro):
asyncio.set_event_loop(loop)
loop.run_until_complete(coro)
loop.close()
# Create a new event loop that will run in a new thread
new_loop = asyncio.new_event_loop()
# Start ollama serve in a separate thread so the cell won't block execution
thread = threading.Thread(target=run_async_in_thread, args=(new_loop, start_ollama_serve()))
thread.start()
# Load up model
os.system(f"ollama pull {model_name}")
# Download Data
os.system("wget -O data.txt https://drive.google.com/uc?id=1uMvEYq17LsvTkX8bU5Fq-2FcG16XbrAW")
from llama_index import SimpleDirectoryReader
from llama_index import Document
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index import ServiceContext, VectorStoreIndex, StorageContext
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index import load_index_from_storage
from llama_index.node_parser import HierarchicalNodeParser
from llama_index.node_parser import get_leaf_nodes
from llama_index import StorageContext
from llama_index.retrievers import AutoMergingRetriever
from llama_index.indices.postprocessor import SentenceTransformerRerank
from llama_index.query_engine import RetrieverQueryEngine
import gradio as gr
import os
from llama_index import get_response_synthesizer
from llama_index.chat_engine.condense_question import (
CondenseQuestionChatEngine,
)
from llama_index import set_global_service_context
def build_automerging_index(
documents,
llm,
embed_model,
save_dir="merging_index",
chunk_sizes=None,
):
chunk_sizes = chunk_sizes or [2048, 512, 128]
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
nodes = node_parser.get_nodes_from_documents(documents)
leaf_nodes = get_leaf_nodes(nodes)
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
set_global_service_context(merging_context)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context,
)
return automerging_index
def get_automerging_query_engine(
automerging_index,
similarity_top_k=5,
rerank_top_n=2,
):
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
synth = get_response_synthesizer(streaming=True)
auto_merging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank],response_synthesizer=synth
)
return auto_merging_engine
llm = Ollama(model=model_name, request_timeout=300.0)
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
documents = SimpleDirectoryReader(
input_files=["data.txt"]
).load_data()
automerging_index = build_automerging_index(
documents,
llm,
embed_model=embed_model,
save_dir="merging_index"
)
automerging_query_engine = get_automerging_query_engine(
automerging_index,
)
automerging_chat_engine = CondenseQuestionChatEngine.from_defaults(
query_engine=automerging_query_engine,
)
def chat(message, history):
res = automerging_chat_engine.stream_chat(message)
response = ""
for text in res.response_gen:
response+=text
yield response
demo = gr.ChatInterface(chat)
demo.launch() |