typeSessionSummarizerinterface{// ShouldSummarize checks if the session should be summarized.ShouldSummarize(sess*session.Session)bool// Summarize generates a summary without modifying the session events.Summarize(ctxcontext.Context,sess*session.Session)(string,error)// SetPrompt updates the summarizer's prompt dynamically.SetPrompt(promptstring)// SetModel updates the summarizer's model dynamically.SetModel(mmodel.Model)// Metadata returns metadata about the summarizer configuration.Metadata()map[string]any}
// AND 逻辑:所有条件都满足才触发summary.WithChecksAll(summary.CheckEventThreshold(10),summary.CheckTokenThreshold(2000),)// OR 逻辑:任一条件满足即触发summary.WithChecksAny(summary.CheckEventThreshold(50),summary.CheckTimeThreshold(10*time.Minute),)
摘要生成
选项
说明
WithMaxSummaryWords(maxWords int)
限制摘要的最大字数,包含在提示词中指导模型生成
WithPrompt(prompt string)
自定义摘要提示词,必须包含 {conversation_text} 占位符
WithSkipRecent(skipFunc SkipRecentFunc)
自定义函数跳过最近事件
Hook 选项
选项
说明
WithPreSummaryHook(h PreSummaryHook)
摘要前的 Hook,可修改输入文本
WithPostSummaryHook(h PostSummaryHook)
摘要后的 Hook,可修改输出摘要
WithSummaryHookAbortOnError(abort bool)
Hook 报错时是否中断,默认 false(忽略错误)
工具调用格式化
默认情况下,摘要器会将工具调用和工具结果包含在发送给 LLM 进行总结的对话文本中。默认格式为:
工具调用:[Called tool: toolName with args: {"arg": "value"}]
callbacks:=model.NewCallbacks().RegisterBeforeModel(func(ctxcontext.Context,args*model.BeforeModelArgs)(*model.BeforeModelResult,error){// Modify args.Request, or return CustomResponse to skip the real model callreturnnil,nil}).RegisterAfterModel(func(ctxcontext.Context,args*model.AfterModelArgs)(*model.AfterModelResult,error){// Override model output via CustomResponsereturnnil,nil})summarizer:=summary.NewSummarizer(summaryModel,summary.WithModelCallbacks(callbacks),)
customPrompt:=`Analyze the following conversation and provide a concise summary,focusing on key decisions, action items, and important context.Keep it within {max_summary_words} words.<conversation>{conversation_text}</conversation>Summary:`summarizer:=summary.NewSummarizer(summaryModel,summary.WithPrompt(customPrompt),summary.WithMaxSummaryWords(100),summary.WithEventThreshold(15),)
import("context""fmt""unicode/utf8""trpc.group/trpc-go/trpc-agent-go/model""trpc.group/trpc-go/trpc-agent-go/session/summary")// Use the built-in simple token countersummary.SetTokenCounter(model.NewSimpleTokenCounter())// Or use a custom implementationtypeMyCustomCounterstruct{}func(c*MyCustomCounter)CountTokens(ctxcontext.Context,messagemodel.Message)(int,error){_=ctxreturnutf8.RuneCountInString(message.Content),nil}func(c*MyCustomCounter)CountTokensRange(ctxcontext.Context,messages[]model.Message,start,endint)(int,error){ifstart<0||end>len(messages)||start>=end{return0,fmt.Errorf("invalid range: start=%d, end=%d, len=%d",start,end,len(messages))}total:=0fori:=start;i<end;i++{tokens,err:=c.CountTokens(ctx,messages[i])iferr!=nil{return0,err}total+=tokens}returntotal,nil}summary.SetTokenCounter(&MyCustomCounter{})
Here is a brief summary of your previous interactions:
<summary_of_previous_interactions>
[Summary content]
</summary_of_previous_interactions>
Note: this information is from previous interactions and may be outdated. You should ALWAYS prefer information from this conversation over the past summary.
packagemainimport("context""time""trpc.group/trpc-go/trpc-agent-go/agent/llmagent""trpc.group/trpc-go/trpc-agent-go/model""trpc.group/trpc-go/trpc-agent-go/model/openai""trpc.group/trpc-go/trpc-agent-go/runner""trpc.group/trpc-go/trpc-agent-go/session/inmemory""trpc.group/trpc-go/trpc-agent-go/session/summary")funcmain(){ctx:=context.Background()// Create LLM model for chat and summaryllm:=openai.New("gpt-4",openai.WithAPIKey("your-api-key"))// Create summarizer with flexible trigger conditionssummarizer:=summary.NewSummarizer(llm,summary.WithMaxSummaryWords(200),summary.WithChecksAny(summary.CheckEventThreshold(20),summary.CheckTokenThreshold(4000),summary.CheckTimeThreshold(5*time.Minute),),)// Create session service with summarizersessionService:=inmemory.NewSessionService(inmemory.WithSummarizer(summarizer),inmemory.WithAsyncSummaryNum(2),inmemory.WithSummaryQueueSize(100),inmemory.WithSummaryJobTimeout(60*time.Second),)// Create agent with summary injection enabledagent:=llmagent.New("my-agent",llmagent.WithModel(llm),llmagent.WithAddSessionSummary(true),llmagent.WithMaxHistoryRuns(10),)// Create runnerr:=runner.NewRunner("my-app",agent,runner.WithSessionService(sessionService))// Run conversation - summary will be managed automaticallyuserMsg:=model.NewUserMessage("Tell me about AI")eventChan,_:=r.Run(ctx,"user123","session456",userMsg)// Consume eventsforevent:=rangeeventChan{_=event}}