|
| 1 | +package deepseek |
| 2 | + |
| 3 | +import ( |
| 4 | + "bufio" |
| 5 | + "bytes" |
| 6 | + "context" |
| 7 | + "encoding/json" |
| 8 | + "errors" |
| 9 | + "fmt" |
| 10 | + "github.com/meysamhadeli/codai/providers/contracts" |
| 11 | + deepseek_models "github.com/meysamhadeli/codai/providers/deepseek/models" |
| 12 | + "github.com/meysamhadeli/codai/providers/models" |
| 13 | + contracts2 "github.com/meysamhadeli/codai/token_management/contracts" |
| 14 | + "io" |
| 15 | + "io/ioutil" |
| 16 | + "net/http" |
| 17 | + "strings" |
| 18 | +) |
| 19 | + |
| 20 | +// DeepSeekConfig implements the Provider interface for DeepSeek. |
| 21 | +type DeepSeekConfig struct { |
| 22 | + ChatBaseURL string |
| 23 | + ChatModel string |
| 24 | + Temperature float32 |
| 25 | + EncodingFormat string |
| 26 | + ChatApiKey string |
| 27 | + MaxTokens int |
| 28 | + Threshold float64 |
| 29 | + TokenManagement contracts2.ITokenManagement |
| 30 | + ChatApiVersion string |
| 31 | +} |
| 32 | + |
| 33 | +// NewDeepSeekChatProvider initializes a new DeepSeekAPIProvider. |
| 34 | +func NewDeepSeekChatProvider(config *DeepSeekConfig) contracts.IChatAIProvider { |
| 35 | + return &DeepSeekConfig{ |
| 36 | + ChatBaseURL: config.ChatBaseURL, |
| 37 | + ChatModel: config.ChatModel, |
| 38 | + Temperature: config.Temperature, |
| 39 | + EncodingFormat: config.EncodingFormat, |
| 40 | + MaxTokens: config.MaxTokens, |
| 41 | + Threshold: config.Threshold, |
| 42 | + ChatApiKey: config.ChatApiKey, |
| 43 | + ChatApiVersion: config.ChatApiVersion, |
| 44 | + TokenManagement: config.TokenManagement, |
| 45 | + } |
| 46 | +} |
| 47 | +func (deepSeekProvider *DeepSeekConfig) ChatCompletionRequest(ctx context.Context, userInput string, prompt string) <-chan models.StreamResponse { |
| 48 | + responseChan := make(chan models.StreamResponse) |
| 49 | + var markdownBuffer strings.Builder // Buffer to accumulate content until newline |
| 50 | + var usage deepseek_models.Usage // Variable to hold usage data |
| 51 | + |
| 52 | + go func() { |
| 53 | + defer close(responseChan) |
| 54 | + |
| 55 | + // Prepare the request body |
| 56 | + reqBody := deepseek_models.DeepSeekChatCompletionRequest{ |
| 57 | + Model: deepSeekProvider.ChatModel, |
| 58 | + Messages: []deepseek_models.Message{ |
| 59 | + {Role: "system", Content: prompt}, |
| 60 | + {Role: "user", Content: userInput}, |
| 61 | + }, |
| 62 | + Stream: true, |
| 63 | + Temperature: &deepSeekProvider.Temperature, |
| 64 | + } |
| 65 | + |
| 66 | + jsonData, err := json.Marshal(reqBody) |
| 67 | + if err != nil { |
| 68 | + markdownBuffer.Reset() |
| 69 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error marshalling request body: %v", err)} |
| 70 | + return |
| 71 | + } |
| 72 | + |
| 73 | + // Create a new HTTP request |
| 74 | + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("%s/chat/completions", deepSeekProvider.ChatBaseURL), bytes.NewBuffer(jsonData)) |
| 75 | + if err != nil { |
| 76 | + markdownBuffer.Reset() |
| 77 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error creating request: %v", err)} |
| 78 | + return |
| 79 | + } |
| 80 | + |
| 81 | + req.Header.Set("Content-Type", "application/json") |
| 82 | + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", deepSeekProvider.ChatApiKey)) |
| 83 | + |
| 84 | + client := &http.Client{} |
| 85 | + resp, err := client.Do(req) |
| 86 | + if err != nil { |
| 87 | + markdownBuffer.Reset() |
| 88 | + if errors.Is(ctx.Err(), context.Canceled) { |
| 89 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("request canceled: %v", err)} |
| 90 | + return |
| 91 | + } |
| 92 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error sending request: %v", err)} |
| 93 | + return |
| 94 | + } |
| 95 | + defer resp.Body.Close() |
| 96 | + |
| 97 | + if resp.StatusCode != http.StatusOK { |
| 98 | + markdownBuffer.Reset() |
| 99 | + body, _ := ioutil.ReadAll(resp.Body) |
| 100 | + var apiError models.AIError |
| 101 | + if err := json.Unmarshal(body, &apiError); err != nil { |
| 102 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error parsing error response: %v", err)} |
| 103 | + return |
| 104 | + } |
| 105 | + |
| 106 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("API request failed with status code '%d' - %s\n", resp.StatusCode, apiError.Error.Message)} |
| 107 | + return |
| 108 | + } |
| 109 | + |
| 110 | + reader := bufio.NewReader(resp.Body) |
| 111 | + |
| 112 | + // Stream processing |
| 113 | + for { |
| 114 | + line, err := reader.ReadString('\n') |
| 115 | + if err != nil { |
| 116 | + markdownBuffer.Reset() |
| 117 | + if err == io.EOF { |
| 118 | + // Stream ended, send any remaining content |
| 119 | + if markdownBuffer.Len() > 0 { |
| 120 | + responseChan <- models.StreamResponse{Content: markdownBuffer.String()} |
| 121 | + } |
| 122 | + |
| 123 | + // Notify that the stream is done |
| 124 | + responseChan <- models.StreamResponse{Done: true} |
| 125 | + |
| 126 | + // Count total tokens usage |
| 127 | + if usage.TotalTokens > 0 { |
| 128 | + deepSeekProvider.TokenManagement.UsedTokens(usage.PromptTokens, usage.CompletionTokens) |
| 129 | + } |
| 130 | + |
| 131 | + break |
| 132 | + } |
| 133 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error reading stream: %v", err)} |
| 134 | + return |
| 135 | + } |
| 136 | + |
| 137 | + if strings.HasPrefix(line, "data: ") { |
| 138 | + jsonPart := strings.TrimPrefix(line, "data: ") |
| 139 | + var response deepseek_models.DeepSeekChatCompletionResponse |
| 140 | + if err := json.Unmarshal([]byte(jsonPart), &response); err != nil { |
| 141 | + markdownBuffer.Reset() |
| 142 | + responseChan <- models.StreamResponse{Err: fmt.Errorf("error unmarshalling chunk: %v", err)} |
| 143 | + return |
| 144 | + } |
| 145 | + |
| 146 | + // Check if the response has usage information |
| 147 | + if response.Usage.TotalTokens > 0 { |
| 148 | + usage = response.Usage // Capture the usage data for later use |
| 149 | + } |
| 150 | + |
| 151 | + // Check for finish_reason |
| 152 | + if len(response.Choices) > 0 && response.Choices[0].FinishReason != "" { |
| 153 | + // Stream completed for this choice |
| 154 | + responseChan <- models.StreamResponse{Content: markdownBuffer.String()} |
| 155 | + responseChan <- models.StreamResponse{Done: true} |
| 156 | + |
| 157 | + // Count total tokens usage |
| 158 | + if usage.TotalTokens > 0 { |
| 159 | + deepSeekProvider.TokenManagement.UsedTokens(usage.PromptTokens, usage.CompletionTokens) |
| 160 | + } |
| 161 | + |
| 162 | + break |
| 163 | + } |
| 164 | + |
| 165 | + // Accumulate and send response content |
| 166 | + if len(response.Choices) > 0 { |
| 167 | + content := response.Choices[0].Delta.Content |
| 168 | + markdownBuffer.WriteString(content) |
| 169 | + |
| 170 | + // Send chunk if it contains a newline, and then reset the buffer |
| 171 | + if strings.Contains(content, "\n") { |
| 172 | + responseChan <- models.StreamResponse{Content: markdownBuffer.String()} |
| 173 | + markdownBuffer.Reset() |
| 174 | + } |
| 175 | + } |
| 176 | + } |
| 177 | + } |
| 178 | + }() |
| 179 | + |
| 180 | + return responseChan |
| 181 | +} |
0 commit comments