.
from dataclasses import dataclass, asdict
import json, os, subprocess, uuid
from typing import Sequence
from langchain_core.language_models import LanguageModelLike
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool, tool
from langchain_gigachat.chat_models import GigaChat
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import InMemorySaver
Используются моделью
@tool
def generate_pdf_act(customer: Customer, jobs: list[Job]) -> None:
"""
Генерирует PDF-акт, в котором заполнены данные клиента, его банковские реквизиты, а также выполненные задачи
Agrs:
customer (Customer): данные клиента
jons (list[Job]): список выполненных задач для внесения в акт
Returns:
None
"""
print(f"generate_pdf_act({asdict(customer)}, {list(map(lambda j: asdict(j), jobs))}")
act_json = {"customer": asdict(customer), "jobs": list(map(lambda j: asdict(j), jobs))}
with open(os.path.join("typst", "act.json"), "w") as f:
json.dump(act_json, f, ensure_ascii=False)
command = ["typst", "compile", "--root", "./typst", "typst/act.typ"]
try:
subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
except subprocess.CalledProcessError as e:
print(e.stderr)
.
class LLMAgent:
def __init__(self, model: LanguageModelLike, tools: Sequence[BaseTool]):
self._model = model
self._agent = create_react_agent(model, tools=tools, checkpointer=InMemorySaver())
self._config: RunnableConfig = {"configurable": {"thread_id": uuid.uuid4().hex}}
def upload_file(self, file):
file_uploaded_id = self._model.upload_file(file).id_
return file_uploaded_id
def invoke(self, content: str, attachments: list[str]|None=None, temperature: float=0.1) -> str:
message: dict = {"role": "user", "content": content, **({"attachments": attachments} if attachments else {})}
return self._agent.invoke({"messages": [message], "temperature": temperature}, config=self._config)["messages"][-1].content
.
def main():
system_prompt = (
"something here"
)
model = GigaChat(model="GigaChat-2-Max", verify_ssl_certs=False, credentials=key)
agent = LLMAgent(model, tools=[generate_pdf_act]) # инструменты, которые даем LLM
file_uploaded_id = agent.upload_file(open(REQUISITES_FILE, "rb"))
agent_response = agent.invoke(content=system_prompt, attachments=[file_uploaded_id])
print("LLM:", agent_response)
while(True):
prompt = input("\nPrompt: ")
llm_response = agent.invoke(prompt)
print("LLM: ", llm_response)