| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155 |
- package main
- import (
- "context"
- "encoding/json"
- "fmt"
- "github.com/sashabaranov/go-openai"
- )
- // LLM is an OpenAI LLM wrapper with tool-calling support
- type LLM struct {
- client *openai.Client
- model string
- temperature float32
- maxTokens int
- }
- // NewLLM creates a new LLM instance
- func NewLLM(apiKey, model string, temperature float32, baseURL string, maxTokens int) *LLM {
- config := openai.DefaultConfig(apiKey)
- if baseURL != "" {
- config.BaseURL = baseURL
- }
- return &LLM{
- client: openai.NewClientWithConfig(config),
- model: model,
- temperature: temperature,
- maxTokens: maxTokens,
- }
- }
- // ChatCompletionRequest is a request for chat completion
- type ChatCompletionRequest struct {
- Messages []openai.ChatCompletionMessage
- Tools []openai.Tool
- }
- // ChatCompletionResponse is a response from chat completion
- type ChatCompletionResponse struct {
- Message openai.ChatCompletionMessage
- }
- // Chat sends a chat completion request
- func (l *LLM) Chat(ctx context.Context, messages []openai.ChatCompletionMessage, tools []openai.Tool) (*openai.ChatCompletionMessage, error) {
- req := openai.ChatCompletionRequest{
- Model: l.model,
- Messages: messages,
- Temperature: l.temperature,
- MaxTokens: l.maxTokens,
- }
- if len(tools) > 0 {
- req.Tools = tools
- }
- resp, err := l.client.CreateChatCompletion(ctx, req)
- if err != nil {
- return nil, fmt.Errorf("failed to create chat completion: %w", err)
- }
- if len(resp.Choices) == 0 {
- return nil, fmt.Errorf("no response choices returned")
- }
- // Log warning if finish reason indicates an issue
- choice := resp.Choices[0]
- if choice.FinishReason == "length" {
- // Model hit token limit - may have incomplete response
- // This is common with reasoning models that need more tokens
- return nil, fmt.Errorf("response truncated: model hit token limit (finish_reason: length). Consider increasing OPENAI_MAX_TOKENS (current: %d). Usage: prompt=%d, completion=%d, total=%d",
- l.maxTokens, resp.Usage.PromptTokens, resp.Usage.CompletionTokens, resp.Usage.TotalTokens)
- }
- return &choice.Message, nil
- }
- // ConvertMCPToolsToOpenAI converts MCP tools to OpenAI tool format
- func ConvertMCPToolsToOpenAI(mcpTools []Tool) []openai.Tool {
- tools := make([]openai.Tool, len(mcpTools))
- for i, t := range mcpTools {
- // Convert InputSchema to JSON schema format using map[string]interface{}
- props := make(map[string]interface{})
- for name, prop := range t.InputSchema.Properties {
- propMap := map[string]interface{}{
- "type": prop.Type,
- "description": prop.Description,
- }
- // For object types without explicit nested properties,
- // allow additionalProperties so the LLM can pass any key-value pairs
- // This is important for tools like 'query' and 'mutate' that accept
- // arbitrary variables objects
- if prop.Type == "object" {
- propMap["additionalProperties"] = true
- }
- props[name] = propMap
- }
- // Build parameters map, omitting empty required array
- params := map[string]interface{}{
- "type": t.InputSchema.Type,
- "properties": props,
- }
- // Only include required if it has elements - empty slice marshals as null
- if len(t.InputSchema.Required) > 0 {
- params["required"] = t.InputSchema.Required
- }
- tools[i] = openai.Tool{
- Type: openai.ToolTypeFunction,
- Function: &openai.FunctionDefinition{
- Name: t.Name,
- Description: t.Description,
- Parameters: params,
- },
- }
- }
- return tools
- }
- // ParseToolCall parses a tool call from the LLM response
- func ParseToolCall(toolCall openai.ToolCall) (string, map[string]interface{}, error) {
- name := toolCall.Function.Name
- var args map[string]interface{}
- if err := json.Unmarshal([]byte(toolCall.Function.Arguments), &args); err != nil {
- return name, nil, fmt.Errorf("failed to parse tool arguments: %w", err)
- }
- return name, args, nil
- }
- // TestConnection tests the connection to OpenAI API
- func (l *LLM) TestConnection(ctx context.Context) error {
- // Simple test request - use enough tokens for reasoning models
- // Reasoning models need more tokens for their thinking process
- req := openai.ChatCompletionRequest{
- Model: l.model,
- Messages: []openai.ChatCompletionMessage{
- {
- Role: openai.ChatMessageRoleUser,
- Content: "Hello",
- },
- },
- MaxTokens: 100,
- }
- _, err := l.client.CreateChatCompletion(ctx, req)
- if err != nil {
- return fmt.Errorf("failed to connect to OpenAI API: %w", err)
- }
- return nil
- }
|