Skip to content

Commit 8bb966a

Browse files
authored
Merge pull request #100 from meysamhadeli/feat-add-deep-seek-api
feat: add deepseek api
2 parents 9e012b1 + 5087bac commit 8bb966a

10 files changed

+241
-8
lines changed

README.md

+6-6
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
> 💡 **Codai is an AI code assistant designed to help developers efficiently manage their daily tasks through a session-based CLI, such as adding new features, refactoring,
99
and performing detailed code reviews. What makes codai stand out is its deep understanding of the entire context of your project, enabling it to analyze your code base
10-
and suggest improvements or new code based on your context. This AI-powered tool supports multiple LLM providers, such as OpenAI, Azure OpenAI, Ollama, Anthropic, and OpenRouter.**
10+
and suggest improvements or new code based on your context. This AI-powered tool supports multiple LLM providers, such as OpenAI, DeepSeek, Azure OpenAI, Ollama, Anthropic, and OpenRouter.**
1111

1212
![](./assets/codai-demo.gif)
1313

@@ -61,9 +61,9 @@ The `codai-config` file should be like following example base on your `AI provid
6161
**codai-config.yml**
6262
```yml
6363
ai_provider_config:
64-
chat_provider_name: "openai" # openai | ollama | azure-openai | anthropic | openrouter
65-
chat_base_url: "https://api.openai.com" # "http://localhost:11434" | "https://test,openai.azure.com" | "https://api.anthropic.com" | "https://openrouter.ai"
66-
chat_model: "gpt-4o"
64+
chat_provider_name: "openai" # openai | deepseek | ollama | azure-openai | anthropic | openrouter
65+
chat_base_url: "https://api.openai.com" # "https://api.deepseek.com" | "http://localhost:11434" | "https://test,openai.azure.com" | "https://api.anthropic.com" | "https://openrouter.ai"
66+
chat_model: "gpt-4o" # "deepseek-chat" | "claude-3-5-sonnet" | "llama3.3"
6767
chat_api_version: "2024-04-01-preview" #(Optional, If your AI provider like 'AzureOpenai' or 'Anthropic' has chat api version.)
6868
embeddings_provider_name: "openai" # openai | ollama | azure-openai
6969
embeddings_base_url: "https://api.openai.com" | "http://localhost:11434" | "https://test.openai.azure.com"
@@ -98,7 +98,7 @@ Also, you can use `.codai-gitignore` in the `root of your working directory,` an
9898

9999
## 🔮 LLM Models
100100
### ⚡ Best Models
101-
The codai works well with advanced LLM models specifically designed for code generation, including `GPT-4o`, `GPT-4`, `Claude 3.5 Sonnet` and `Claude 3 Opus`. These models leverage the latest in AI technology, providing powerful capabilities for understanding and generating code, making them ideal for enhancing your development workflow.
101+
The codai works well with advanced LLM models specifically designed for code generation, including `GPT-4o`, `GPT-4`, `deepseek-chat` and `claude-3-5-sonnet`. These models leverage the latest in AI technology, providing powerful capabilities for understanding and generating code, making them ideal for enhancing your development workflow.
102102

103103
### 💻 Local Models
104104
In addition to cloud-based models, codai is compatible with local models such as `Ollama`. To achieve the best results, it is recommended to utilize models like [Phi-3-medium instruct (128k)](https://github.com/marketplace/models/azureml/Phi-3-medium-128k-instruct), [Mistral Large (2407)](https://github.com/marketplace/models/azureml-mistral/Mistral-large-2407) and [Meta-Llama-3.1-70B-Instruct](https://github.com/marketplace/models/azureml-meta/Meta-Llama-3-1-70B-Instruct). These models have been optimized for coding tasks, ensuring that you can maximize the efficiency and effectiveness of your coding projects.
@@ -143,7 +143,7 @@ This command will initiate the codai assistant to help you with your coding task
143143

144144
🌳 Summarize Full Project Context using Tree-sitter.
145145

146-
⚡ Support variety of LLM models like GPT-4o, GPT-4, and Ollama.
146+
⚡ Support variety of LLM models like GPT-4o, GPT-4, deepseek-chat, claude-3-5-sonnet, and Ollama.
147147

148148
🗂️ Enable the AI to modify several files at the same time.
149149

providers/ai_provider.go

+13-1
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,9 @@ package providers
33
import (
44
"errors"
55
"github.com/meysamhadeli/codai/providers/anthropic"
6-
azure_openai "github.com/meysamhadeli/codai/providers/azure-openai"
6+
"github.com/meysamhadeli/codai/providers/azure_openai"
77
"github.com/meysamhadeli/codai/providers/contracts"
8+
"github.com/meysamhadeli/codai/providers/deepseek"
89
"github.com/meysamhadeli/codai/providers/ollama"
910
"github.com/meysamhadeli/codai/providers/openai"
1011
"github.com/meysamhadeli/codai/providers/openrouter"
@@ -43,6 +44,17 @@ func ChatProviderFactory(config *AIProviderConfig, tokenManagement contracts2.IT
4344
Threshold: config.Threshold,
4445
TokenManagement: tokenManagement,
4546
}), nil
47+
case "deepseek":
48+
return deepseek.NewDeepSeekChatProvider(&deepseek.DeepSeekConfig{
49+
Temperature: config.Temperature,
50+
EncodingFormat: config.EncodingFormat,
51+
ChatModel: config.ChatModel,
52+
ChatBaseURL: config.ChatBaseURL,
53+
ChatApiKey: config.ChatApiKey,
54+
MaxTokens: config.MaxTokens,
55+
Threshold: config.Threshold,
56+
TokenManagement: tokenManagement,
57+
}), nil
4658
case "openai":
4759
return openai.NewOpenAIChatProvider(&openai.OpenAIConfig{
4860
Temperature: config.Temperature,

providers/azure-openai/azure_openai_provider.go renamed to providers/azure_openai/azure_openai_provider.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import (
77
"encoding/json"
88
"errors"
99
"fmt"
10-
azure_openai_models "github.com/meysamhadeli/codai/providers/azure-openai/models"
10+
azure_openai_models "github.com/meysamhadeli/codai/providers/azure_openai/models"
1111
"github.com/meysamhadeli/codai/providers/contracts"
1212
"github.com/meysamhadeli/codai/providers/models"
1313
contracts2 "github.com/meysamhadeli/codai/token_management/contracts"
+181
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,181 @@
1+
package deepseek
2+
3+
import (
4+
"bufio"
5+
"bytes"
6+
"context"
7+
"encoding/json"
8+
"errors"
9+
"fmt"
10+
"github.com/meysamhadeli/codai/providers/contracts"
11+
deepseek_models "github.com/meysamhadeli/codai/providers/deepseek/models"
12+
"github.com/meysamhadeli/codai/providers/models"
13+
contracts2 "github.com/meysamhadeli/codai/token_management/contracts"
14+
"io"
15+
"io/ioutil"
16+
"net/http"
17+
"strings"
18+
)
19+
20+
// DeepSeekConfig implements the Provider interface for DeepSeek.
21+
type DeepSeekConfig struct {
22+
ChatBaseURL string
23+
ChatModel string
24+
Temperature float32
25+
EncodingFormat string
26+
ChatApiKey string
27+
MaxTokens int
28+
Threshold float64
29+
TokenManagement contracts2.ITokenManagement
30+
ChatApiVersion string
31+
}
32+
33+
// NewDeepSeekChatProvider initializes a new DeepSeekAPIProvider.
34+
func NewDeepSeekChatProvider(config *DeepSeekConfig) contracts.IChatAIProvider {
35+
return &DeepSeekConfig{
36+
ChatBaseURL: config.ChatBaseURL,
37+
ChatModel: config.ChatModel,
38+
Temperature: config.Temperature,
39+
EncodingFormat: config.EncodingFormat,
40+
MaxTokens: config.MaxTokens,
41+
Threshold: config.Threshold,
42+
ChatApiKey: config.ChatApiKey,
43+
ChatApiVersion: config.ChatApiVersion,
44+
TokenManagement: config.TokenManagement,
45+
}
46+
}
47+
func (deepSeekProvider *DeepSeekConfig) ChatCompletionRequest(ctx context.Context, userInput string, prompt string) <-chan models.StreamResponse {
48+
responseChan := make(chan models.StreamResponse)
49+
var markdownBuffer strings.Builder // Buffer to accumulate content until newline
50+
var usage deepseek_models.Usage // Variable to hold usage data
51+
52+
go func() {
53+
defer close(responseChan)
54+
55+
// Prepare the request body
56+
reqBody := deepseek_models.DeepSeekChatCompletionRequest{
57+
Model: deepSeekProvider.ChatModel,
58+
Messages: []deepseek_models.Message{
59+
{Role: "system", Content: prompt},
60+
{Role: "user", Content: userInput},
61+
},
62+
Stream: true,
63+
Temperature: &deepSeekProvider.Temperature,
64+
}
65+
66+
jsonData, err := json.Marshal(reqBody)
67+
if err != nil {
68+
markdownBuffer.Reset()
69+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error marshalling request body: %v", err)}
70+
return
71+
}
72+
73+
// Create a new HTTP request
74+
req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("%s/chat/completions", deepSeekProvider.ChatBaseURL), bytes.NewBuffer(jsonData))
75+
if err != nil {
76+
markdownBuffer.Reset()
77+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error creating request: %v", err)}
78+
return
79+
}
80+
81+
req.Header.Set("Content-Type", "application/json")
82+
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", deepSeekProvider.ChatApiKey))
83+
84+
client := &http.Client{}
85+
resp, err := client.Do(req)
86+
if err != nil {
87+
markdownBuffer.Reset()
88+
if errors.Is(ctx.Err(), context.Canceled) {
89+
responseChan <- models.StreamResponse{Err: fmt.Errorf("request canceled: %v", err)}
90+
return
91+
}
92+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error sending request: %v", err)}
93+
return
94+
}
95+
defer resp.Body.Close()
96+
97+
if resp.StatusCode != http.StatusOK {
98+
markdownBuffer.Reset()
99+
body, _ := ioutil.ReadAll(resp.Body)
100+
var apiError models.AIError
101+
if err := json.Unmarshal(body, &apiError); err != nil {
102+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error parsing error response: %v", err)}
103+
return
104+
}
105+
106+
responseChan <- models.StreamResponse{Err: fmt.Errorf("API request failed with status code '%d' - %s\n", resp.StatusCode, apiError.Error.Message)}
107+
return
108+
}
109+
110+
reader := bufio.NewReader(resp.Body)
111+
112+
// Stream processing
113+
for {
114+
line, err := reader.ReadString('\n')
115+
if err != nil {
116+
markdownBuffer.Reset()
117+
if err == io.EOF {
118+
// Stream ended, send any remaining content
119+
if markdownBuffer.Len() > 0 {
120+
responseChan <- models.StreamResponse{Content: markdownBuffer.String()}
121+
}
122+
123+
// Notify that the stream is done
124+
responseChan <- models.StreamResponse{Done: true}
125+
126+
// Count total tokens usage
127+
if usage.TotalTokens > 0 {
128+
deepSeekProvider.TokenManagement.UsedTokens(usage.PromptTokens, usage.CompletionTokens)
129+
}
130+
131+
break
132+
}
133+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error reading stream: %v", err)}
134+
return
135+
}
136+
137+
if strings.HasPrefix(line, "data: ") {
138+
jsonPart := strings.TrimPrefix(line, "data: ")
139+
var response deepseek_models.DeepSeekChatCompletionResponse
140+
if err := json.Unmarshal([]byte(jsonPart), &response); err != nil {
141+
markdownBuffer.Reset()
142+
responseChan <- models.StreamResponse{Err: fmt.Errorf("error unmarshalling chunk: %v", err)}
143+
return
144+
}
145+
146+
// Check if the response has usage information
147+
if response.Usage.TotalTokens > 0 {
148+
usage = response.Usage // Capture the usage data for later use
149+
}
150+
151+
// Check for finish_reason
152+
if len(response.Choices) > 0 && response.Choices[0].FinishReason != "" {
153+
// Stream completed for this choice
154+
responseChan <- models.StreamResponse{Content: markdownBuffer.String()}
155+
responseChan <- models.StreamResponse{Done: true}
156+
157+
// Count total tokens usage
158+
if usage.TotalTokens > 0 {
159+
deepSeekProvider.TokenManagement.UsedTokens(usage.PromptTokens, usage.CompletionTokens)
160+
}
161+
162+
break
163+
}
164+
165+
// Accumulate and send response content
166+
if len(response.Choices) > 0 {
167+
content := response.Choices[0].Delta.Content
168+
markdownBuffer.WriteString(content)
169+
170+
// Send chunk if it contains a newline, and then reset the buffer
171+
if strings.Contains(content, "\n") {
172+
responseChan <- models.StreamResponse{Content: markdownBuffer.String()}
173+
markdownBuffer.Reset()
174+
}
175+
}
176+
}
177+
}
178+
}()
179+
180+
return responseChan
181+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
package models
2+
3+
// DeepSeekChatCompletionRequest Define the request body structure
4+
type DeepSeekChatCompletionRequest struct {
5+
Model string `json:"model"`
6+
Messages []Message `json:"messages"`
7+
Temperature *float32 `json:"temperature,omitempty"` // Optional field (pointer to float32)
8+
Stream bool `json:"stream"`
9+
}
10+
11+
// Message Define the request body structure
12+
type Message struct {
13+
Role string `json:"role"`
14+
Content string `json:"content"`
15+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
package models
2+
3+
// DeepSeekChatCompletionResponse represents the entire response structure from OpenAI's chat completion API.
4+
type DeepSeekChatCompletionResponse struct {
5+
Choices []Choice `json:"choices"` // Array of choice completions
6+
Usage Usage `json:"usage"` // Token usage details
7+
}
8+
9+
// Choice represents an individual choice in the response.
10+
type Choice struct {
11+
Delta Delta `json:"delta"`
12+
FinishReason string `json:"finish_reason"` // Check for completion
13+
}
14+
15+
// Delta represents the delta object in each choice containing the content.
16+
type Delta struct {
17+
Content string `json:"content"`
18+
}
19+
20+
// Usage defines the token usage information for the response.
21+
type Usage struct {
22+
PromptTokens int `json:"prompt_tokens"` // Number of tokens in the prompt
23+
CompletionTokens int `json:"completion_tokens"` // Number of tokens in the completion
24+
TotalTokens int `json:"total_tokens"` // Total tokens used
25+
}

0 commit comments

Comments
 (0)