728x90
chat_router.py
from domain.ai.service import chat
class Message(BaseModel):
role: Union[str, None] = None
content : str
session_id : str
@router.post("lang-chain")
def chat_request_by_lang_chain(message: Message):
return chat.chat_completion_by_lang_chain(session_id = message.session_id, message=message.content)
chat.py
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.schema.runnable.passthrough import RunnablePassthrough
from langchain_core.runnables import RunnableLambda
from operator import itemgetter
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.agents import AgentExecutor
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory
)
def chat_completion_by_lang_chain(session_id, message):
# prompt 에 질문에대한 답변을 담는것이 아니라, 어떻게 작동하라고 지시하는것
# 모델에 전달되는 입력 데이터의 템플릿 일 뿐
prompt = ChatPromptTemplate.from_message(
[
(
"system",
"You are very powerful assistant",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
chat_model_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
# 구현된 agent는 응답을 생성하는 실행 가능한 파이프라인 임.
# 응답은 agent.invoke() 또는 AgentExecutor와 같은 실행 메서드를 호출했을 때 생성되고 반환됨
agent = (
RunnablePassthrough.assign(
agent_scratchpad = RunnableLambda(
itemgetter('intermediage_steps')
)
| format_to_openai_functions
)
| prompt
| chat_model_with_tools
| OpenAIFunctionsAgentOutputParser()
)
agent_excutor = AgentExecutor(agent=agent, tools=tools, verbose=True)
# AgentExecutor로 응답을 생성했지만, 대화의 맥락을 유지하기 위해 응답을 한번 더 생성
# invoke 관련 작업을 두번 실행
with_message_history = RunnableWithMessageHistory(
agent_executor,
get_session_history,
input_messages_key="input",
)
output = with_message_history.invoke(
{ "input" : message},
config={"configurable": {"session_id":session_id}},
)
return {
'content' : output['output'],
'role' : 'ai' # ai가 생성한 응답
}
store = {}
def get_session_history(session_id:str) -> BaseChatMessageHistory:
print('get_session_history : ' + session_id)
if session_id not in store:
store[session_id] = InMemoryChatMessageHistory()
return store[session_id]
'(ktds) 24.08.05 ~ > 생성형AI' 카테고리의 다른 글
[생성형AI] conflunence 데이터를 바탕으로 chatgpt 직접 구현해보기 (1편) (3) | 2024.12.27 |
---|