基于图对话案例baseChatNode
from typing import TypedDict, Annotated
from langgraph.graph import StateGraph
from langgraph.graph.message import add_messages
class State(TypedDict):
messages: Annotated[list, add_messages]
def chatbot_node(state: State) -> list:
return {
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Here's an image:",
"cache_control": {"type": "ephemeral"},
},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "1234",
},
},
],
},
],
}
builder = StateGraph(State)
builder.add_node("chatbot", chatbot_node)
builder.set_entry_point("chatbot")
builder.set_finish_point("chatbot")
graph = builder.compile()
def stream_graph_updates(user_input: str):
for event in graph.stream({"messages": [{"role": "user", "content": user_input}]}):
print("Event:", event)
for attr in event:
value = event[attr]
print("Value:", value)
print("Assistant:", value["messages"][-1][ "content"])
while True:
try:
user_input = input("User: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
break
stream_graph_updates(user_input)
except:
# fallback if input() is not available
user_input = "What do you know about LangGraph?"
print("User: " + user_input)
stream_graph_updates(user_input)
break
如何格式化输出内容
继承BaseModel定义结构
class QuestionTemp(BaseModel):
stem: str = Field(
description="题目题干"
)
options: List[str] = Field(
description="题目选项"
)
solve: List[str] = Field(
description="解答"
)
完整代码
from typing import Annotated, List, Literal
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
from pydantic import Field
from typing_extensions import TypedDict
from langgraph.graph import StateGraph, START, END
OPENAI_API_KEY= "lm-studio"
OPENAI_MODEL_NAME = "qwen2.5-3b-instruct"
OPENAI_BASE_URL="http://localhost:9999/v1"
llm = ChatOpenAI(
openai_api_key=OPENAI_API_KEY,
model_name=OPENAI_MODEL_NAME,
openai_api_base=OPENAI_BASE_URL,
temperature=1
)
class QuestionOption(BaseModel):
option: str = Field(
description="选项[A,B,C,D]"
)
content: str = Field(
description="选项内容"
)
class QuestionTemp(BaseModel):
stem: str = Field(
description="题目题干"
)
options: List[str] = Field(
description="题目选项"
)
solve: List[str] = Field(
description="解答"
)
print(QuestionTemp.model_json_schema())
evaluator = llm.with_structured_output(QuestionTemp)
messages = [
SystemMessage(
content="你是一个高等数学老师,你需要根据用户的问题,生成题目,题目需要包含题干,选项,解答,格式化输出。"
),
HumanMessage(
content="请生成10道2023年的高等数学题目,题目需要包含题干,选项,解答,格式化输出。"
)
]
contents = evaluator.invoke(messages)
print("文档",contents)
输出
{'properties': {'stem': {'description': '题目题干', 'title': 'Stem', 'type': 'string'}, 'options': {'description': '题目选项', 'items': {'type': 'string'}, 'title': 'Options', 'type': 'array'}, 'solve': {'description': '解答', 'items': {'type': 'string'}, 'title': 'Solve', 'type': 'array'}}, 'required': ['stem', 'options', 'solve'], 'title': 'QuestionTemp', 'type': 'object'}
——————————————————————————————————————————————————————————————————————————————————————
文档 stem='若\x0crac{π}{4}≤θ<\x0crac{5π}{6},则函数f(θ)=\x0crac{1+sinθ-cos^2θ}{cos^2θ-1}-3tanθ的值域是?' options=['[-3, 1]', '[1, +∞)', '(-4, -1)', '[-4, -1]'] solve=['f(θ)=\x0crac{1+sinθ-cos^2θ}{cos^2θ-1}-3tanθ=\x0crac{sinθ-(1-sin^{2}θ)}{cos^2θ-1}-3\times \x0crac {sinθ}{cosθ}=-3\times \x0crac {sinθ}{cosθ}$\n\n若取t=tanθ,则有$f(θ)=-3t$。\n又由于$\x0crac{π}{4}≤θ<\x0crac{5π}{6},则-2<-tanθ≤-\x0crac{\root {}\thinspace {} \x08rack {1}}{3}$\n故:$-6<-3\times tanθ≤3$。', '答案是:[-4, -1]']