langserve
langserve copied to clipboard
How do I use create_react_agent in langServe?
Server test code as follows:
llm_end_point_url = "http://172.16.21.155:8000/v1/"
model = ChatOpenAI(model="glm4v-9b",base_url=llm_end_point_url, api_key="api_key")
### embedding ###
embedding_model = HuggingFaceEmbeddings(model_name='/root/ljm/bge/bge-large-zh-v1.5')
### milvus ###
milvus_host = "***"
milvus_port = ***
collection_name = "***"
vector_store = Milvus(
embedding_function=embedding_model,
collection_name="langchain_lichi_txt",
connection_args={"host": milvus_host, "port": milvus_port, "db_name": "***"},
)
retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 3})
RAG = create_retriever_tool(
retriever=retriever,
name="***",
description="***",
)
tools = [RAG]
app = FastAPI(
title="GLM4 LangChain Server",
version="1.0",
description="A simple api server using Langchain's Runnable interfaces",
)
agent_executor = create_react_agent(model, tools, debug=True)
rag_chain = ConversationalRetrievalChain.from_llm(
llm=model,
retriever=retriever
)
add_routes(
app,
agent_executor,
path="/Litchi_RAG",
)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="172.16.21.155", port=8010)
Client test code as follows:
rag_chain = RemoteRunnable("http://172.16.21.155:8010/RAG")
query = "********?"
response = rag_chain.invoke({"question": query})
print(response)
Server error:
File "/root/anaconda3/envs/ljm_glm4_conda/lib/python3.11/site-packages/langchain_core/messages/utils.py", line 301, in _convert_to_message
raise NotImplementedError(f"Unsupported message type: {type(message)}")
NotImplementedError: Unsupported message type: <class 'NoneType'>