|
1 | | -import { useState } from 'react'; |
2 | | -import { AIResponse } from '@/types/ai'; |
3 | | -import { useSettings } from '@/contexts/SettingsContext'; |
| 1 | +import { useState } from "react"; |
4 | 2 |
|
5 | 3 | export const useAI = () => { |
6 | | - const [responses, setResponses] = useState<Record<string, AIResponse>>({}); |
7 | | - const [chatgptResponse, setChatgptResponse] = useState<AIResponse>({ |
8 | | - modelId: 'chatgpt', |
9 | | - content: '', |
10 | | - isLoading: false |
11 | | - }); |
12 | | - const { apiKeys } = useSettings(); |
| 4 | + const [loading, setLoading] = useState(false); |
13 | 5 |
|
14 | 6 | const callOpenAI = async (prompt: string, apiKey: string): Promise<string> => { |
15 | | - const response = await fetch('https://api.openai.com/v1/chat/completions', { |
16 | | - method: 'POST', |
| 7 | + const response = await fetch("https://api.openai.com/v1/chat/completions", { |
| 8 | + method: "POST", |
17 | 9 | headers: { |
18 | | - 'Authorization': `Bearer ${apiKey}`, |
19 | | - 'Content-Type': 'application/json', |
| 10 | + "Content-Type": "application/json", |
| 11 | + Authorization: `Bearer ${apiKey}`, |
20 | 12 | }, |
21 | 13 | body: JSON.stringify({ |
22 | | - model: 'gpt-4o-mini', |
23 | | - messages: [{ role: 'user', content: prompt }], |
24 | | - max_tokens: 500, |
25 | | - temperature: 0.7, |
| 14 | + model: "gpt-4", |
| 15 | + messages: [{ role: "user", content: prompt }], |
26 | 16 | }), |
27 | 17 | }); |
28 | 18 |
|
29 | | - if (!response.ok) { |
30 | | - const errorData = await response.json().catch(() => ({})); |
31 | | - const errorMessage = errorData?.error?.message || `OpenAI API returned ${response.status} ${response.statusText}`; |
32 | | - throw new Error(`OpenAI Error: ${errorMessage}`); |
33 | | - } |
34 | | - |
35 | | - const data = await response.json(); |
36 | | - return data.choices[0]?.message?.content || 'No response received'; |
37 | | - }; |
38 | | - |
39 | | - const callClaude = async (prompt: string, apiKey: string): Promise<string> => { |
40 | | - const response = await fetch('https://api.anthropic.com/v1/messages', { |
41 | | - method: 'POST', |
42 | | - headers: { |
43 | | - 'Authorization': `Bearer ${apiKey}`, |
44 | | - 'Content-Type': 'application/json', |
45 | | - 'anthropic-version': '2023-06-01', |
46 | | - }, |
47 | | - body: JSON.stringify({ |
48 | | - model: 'claude-3-haiku-20240307', |
49 | | - max_tokens: 500, |
50 | | - messages: [{ role: 'user', content: prompt }], |
51 | | - }), |
52 | | - }); |
| 19 | + const result = await response.json(); |
53 | 20 |
|
54 | 21 | if (!response.ok) { |
55 | | - const errorData = await response.json().catch(() => ({})); |
56 | | - const errorMessage = errorData?.error?.message || `Claude API returned ${response.status} ${response.statusText}`; |
57 | | - throw new Error(`Claude Error: ${errorMessage}`); |
| 22 | + throw new Error(result.error?.message || "OpenAI Error"); |
58 | 23 | } |
59 | 24 |
|
60 | | - const data = await response.json(); |
61 | | - return data.content[0]?.text || 'No response received'; |
| 25 | + return result.choices?.[0]?.message?.content || "No response from OpenAI"; |
62 | 26 | }; |
63 | 27 |
|
64 | | -const callGemini = async (prompt: string, apiKey: string): Promise<string> => { |
65 | | - const response = await fetch( |
66 | | - `https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent?key=${apiKey}`, |
67 | | - { |
68 | | - method: 'POST', |
69 | | - headers: { 'Content-Type': 'application/json' }, |
70 | | - body: JSON.stringify({ |
71 | | - contents: [ |
72 | | - { |
73 | | - parts: [{ text: prompt }], |
| 28 | + const callGemini = async (prompt: string, apiKey: string): Promise<string> => { |
| 29 | + const response = await fetch( |
| 30 | + `https://generativelanguage.googleapis.com/v1/models/gemini-pro:generateContent?key=${apiKey}`, |
| 31 | + { |
| 32 | + method: "POST", |
| 33 | + headers: { "Content-Type": "application/json" }, |
| 34 | + body: JSON.stringify({ |
| 35 | + contents: [ |
| 36 | + { |
| 37 | + parts: [{ text: prompt }], |
| 38 | + }, |
| 39 | + ], |
| 40 | + generationConfig: { |
| 41 | + maxOutputTokens: 500, |
| 42 | + temperature: 0.7, |
74 | 43 | }, |
75 | | - ], |
76 | | - generationConfig: { |
77 | | - maxOutputTokens: 500, |
78 | | - temperature: 0.7, |
79 | | - }, |
80 | | - }), |
81 | | - } |
82 | | - ); |
| 44 | + }), |
| 45 | + } |
| 46 | + ); |
83 | 47 |
|
84 | | - const data = await response.json(); |
| 48 | + const result = await response.json(); |
85 | 49 |
|
86 | 50 | if (!response.ok) { |
87 | | - const errorData = await response.json().catch(() => ({})); |
88 | | - const errorMessage = errorData?.error?.message || `Gemini API returned ${response.status} ${response.statusText}`; |
89 | | - throw new Error(`Gemini Error: ${errorMessage}`); |
| 51 | + throw new Error(result.error?.message || "Gemini Error"); |
90 | 52 | } |
91 | 53 |
|
92 | | - const data = await response.json(); |
93 | | - return data.candidates[0]?.content?.parts[0]?.text || 'No response received'; |
| 54 | + return result.candidates?.[0]?.content?.parts?.[0]?.text || "No response from Gemini"; |
94 | 55 | }; |
95 | 56 |
|
96 | | - const callDeepSeek = async (prompt: string, apiKey: string): Promise<string> => { |
97 | | - const response = await fetch('https://api.deepseek.com/v1/chat/completions', { |
98 | | - method: 'POST', |
| 57 | + const callClaude = async (prompt: string, apiKey: string): Promise<string> => { |
| 58 | + const response = await fetch("https://api.anthropic.com/v1/messages", { |
| 59 | + method: "POST", |
99 | 60 | headers: { |
100 | | - 'Authorization': `Bearer ${apiKey}`, |
101 | | - 'Content-Type': 'application/json', |
| 61 | + "Content-Type": "application/json", |
| 62 | + "x-api-key": apiKey, |
| 63 | + "anthropic-version": "2023-06-01", |
102 | 64 | }, |
103 | 65 | body: JSON.stringify({ |
104 | | - model: 'deepseek-chat', |
105 | | - messages: [{ role: 'user', content: prompt }], |
106 | | - max_tokens: 500, |
107 | | - temperature: 0.7, |
| 66 | + model: "claude-3-opus-20240229", |
| 67 | + max_tokens: 1024, |
| 68 | + messages: [{ role: "user", content: prompt }], |
108 | 69 | }), |
109 | 70 | }); |
110 | 71 |
|
111 | | - if (!response.ok) { |
112 | | - const errorData = await response.json().catch(() => ({})); |
113 | | - const errorMessage = errorData?.error?.message || `DeepSeek API returned ${response.status} ${response.statusText}`; |
114 | | - throw new Error(`DeepSeek Error: ${errorMessage}`); |
115 | | - } |
116 | | - |
117 | | - const data = await response.json(); |
118 | | - return data.choices[0]?.message?.content || 'No response received'; |
119 | | - }; |
120 | | - |
121 | | - const callLlama = async (prompt: string, baseUrl: string): Promise<string> => { |
122 | | - const cleanUrl = baseUrl.replace(/\/$/, ''); |
123 | | - const response = await fetch(`${cleanUrl}/api/generate`, { |
124 | | - method: 'POST', |
125 | | - headers: { 'Content-Type': 'application/json' }, |
126 | | - body: JSON.stringify({ model: 'llama2', prompt, stream: false }), |
127 | | - }); |
| 72 | + const result = await response.json(); |
128 | 73 |
|
129 | 74 | if (!response.ok) { |
130 | | - if (response.status === 0 || !response.status) { |
131 | | - throw new Error('Ollama Error: Cannot connect to Ollama. Make sure Ollama is running and accessible.'); |
132 | | - } |
133 | | - const errorData = await response.json().catch(() => ({})); |
134 | | - const errorMessage = errorData?.error || `Ollama returned ${response.status} ${response.statusText}`; |
135 | | - throw new Error(`Ollama Error: ${errorMessage}`); |
| 75 | + throw new Error(result.error?.message || "Claude Error"); |
136 | 76 | } |
137 | 77 |
|
138 | | - const data = await response.json(); |
139 | | - return data.response || 'No response received'; |
140 | | - }; |
141 | | - |
142 | | - const updateResponse = (modelId: string, update: Partial<AIResponse>) => { |
143 | | - setResponses(prev => ({ |
144 | | - ...prev, |
145 | | - [modelId]: { |
146 | | - ...prev[modelId], |
147 | | - ...update, |
148 | | - modelId |
149 | | - } |
150 | | - })); |
| 78 | + return result.content?.[0]?.text || "No response from Claude"; |
151 | 79 | }; |
152 | 80 |
|
153 | | - const callModel = async (modelId: string, prompt: string) => { |
154 | | - updateResponse(modelId, { isLoading: true, error: undefined }); |
| 81 | + const callLLaMA = async (prompt: string, apiUrl: string): Promise<string> => { |
| 82 | + const response = await fetch(apiUrl, { |
| 83 | + method: "POST", |
| 84 | + headers: { "Content-Type": "application/json" }, |
| 85 | + body: JSON.stringify({ |
| 86 | + prompt: prompt, |
| 87 | + }), |
| 88 | + }); |
155 | 89 |
|
156 | | - try { |
157 | | - let content = ''; |
| 90 | + const result = await response.json(); |
158 | 91 |
|
159 | | - switch (modelId) { |
160 | | - case 'openai': |
161 | | - if (!apiKeys.openai) throw new Error('Missing API key for ChatGPT. Please add your OpenAI API key in Settings.'); |
162 | | - content = await callOpenAI(prompt, apiKeys.openai); |
163 | | - break; |
164 | | - case 'claude': |
165 | | - if (!apiKeys.anthropic) throw new Error('Missing API key for Claude. Please add your Anthropic API key in Settings.'); |
166 | | - if (!apiKeys.anthropic.startsWith('sk-ant-')) throw new Error('Invalid Anthropic API key format. Key should start with "sk-ant-"'); |
167 | | - content = await callClaude(prompt, apiKeys.anthropic); |
168 | | - break; |
169 | | - case 'gemini': |
170 | | - if (!apiKeys.google) throw new Error('Missing API key for Gemini. Please add your Google AI API key in Settings.'); |
171 | | - content = await callGemini(prompt, apiKeys.google); |
172 | | - break; |
173 | | - case 'deepseek': |
174 | | - if (!apiKeys.deepseek) throw new Error('Missing API key for DeepSeek. Please add your DeepSeek API key in Settings.'); |
175 | | - content = await callDeepSeek(prompt, apiKeys.deepseek); |
176 | | - break; |
177 | | - case 'llama': |
178 | | - if (!apiKeys.ollama) throw new Error('Missing Ollama URL. Please add your Ollama server URL in Settings.'); |
179 | | - content = await callLlama(prompt, apiKeys.ollama); |
180 | | - break; |
181 | | - default: |
182 | | - throw new Error(`Unknown model: ${modelId}`); |
183 | | - } |
184 | | - |
185 | | - updateResponse(modelId, { content, isLoading: false }); |
186 | | - } catch (error) { |
187 | | - const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred'; |
188 | | - updateResponse(modelId, { error: errorMessage, isLoading: false, content: '' }); |
189 | | - } |
190 | | - }; |
191 | | - |
192 | | - const generateSummary = async (prompt: string, modelResponses: Record<string, AIResponse>) => { |
193 | | - if (!apiKeys.openai) { |
194 | | - setChatgptResponse({ |
195 | | - modelId: 'chatgpt', |
196 | | - content: '', |
197 | | - isLoading: false, |
198 | | - error: 'OpenAI API key is missing' |
199 | | - }); |
200 | | - return; |
| 92 | + if (!response.ok) { |
| 93 | + throw new Error(result.error?.message || "LLaMA Error"); |
201 | 94 | } |
202 | 95 |
|
203 | | - setChatgptResponse(prev => ({ ...prev, isLoading: true, error: undefined })); |
204 | | - |
205 | | - try { |
206 | | - const validResponses = Object.entries(modelResponses) |
207 | | - .filter(([_, response]) => response.content && !response.error) |
208 | | - .map(([modelId, response]) => `${modelId.toUpperCase()}: ${response.content}`) |
209 | | - .join('\n\n'); |
210 | | - |
211 | | - if (!validResponses.trim()) { |
212 | | - setChatgptResponse({ |
213 | | - modelId: 'chatgpt', |
214 | | - content: '', |
215 | | - isLoading: false, |
216 | | - error: 'هیچ پاسخی از مدلهای دیگر دریافت نشد، بنابراین خلاصهای نمیتوان ساخت.' |
217 | | - }); |
218 | | - return; |
219 | | - } |
220 | | - |
221 | | - const summaryPrompt = ` |
222 | | -Original question: "${prompt}" |
223 | | -
|
224 | | -Here are responses from different AI models: |
225 | | -${validResponses} |
226 | | -
|
227 | | -Please provide a concise summary that synthesizes the key insights from these responses. Focus on the most important points and any notable differences or agreements between the models. |
228 | | - `; |
229 | | - |
230 | | - const content = await callOpenAI(summaryPrompt, apiKeys.openai); |
231 | | - setChatgptResponse({ |
232 | | - modelId: 'chatgpt', |
233 | | - content, |
234 | | - isLoading: false, |
235 | | - error: undefined |
236 | | - }); |
237 | | - } catch (error) { |
238 | | - const errorMessage = error instanceof Error ? error.message : 'Failed to generate summary'; |
239 | | - setChatgptResponse({ |
240 | | - modelId: 'chatgpt', |
241 | | - content: '', |
242 | | - isLoading: false, |
243 | | - error: errorMessage |
244 | | - }); |
245 | | - } |
| 96 | + return result.response || "No response from LLaMA"; |
246 | 97 | }; |
247 | 98 |
|
248 | | - const submitPrompt = async (prompt: string) => { |
249 | | - setResponses({}); |
250 | | - setChatgptResponse({ modelId: 'chatgpt', content: '', isLoading: false }); |
251 | | - |
252 | | - const modelIds = ['openai', 'claude', 'gemini', 'deepseek', 'llama']; |
253 | | - const promises = modelIds.map(async (modelId) => { |
254 | | - try { |
255 | | - await callModel(modelId, prompt); |
256 | | - } catch (error) { |
257 | | - console.error(`Error calling ${modelId}:`, error); |
258 | | - } |
| 99 | + const callGrok = async (prompt: string, apiKey: string): Promise<string> => { |
| 100 | + const response = await fetch("https://grok.securemanager.dev/api/grok", { |
| 101 | + method: "POST", |
| 102 | + headers: { |
| 103 | + "Content-Type": "application/json", |
| 104 | + Authorization: `Bearer ${apiKey}`, |
| 105 | + }, |
| 106 | + body: JSON.stringify({ prompt }), |
259 | 107 | }); |
260 | 108 |
|
261 | | - await Promise.allSettled(promises); |
262 | | - |
263 | | - setTimeout(() => { |
264 | | - setResponses(currentResponses => { |
265 | | - generateSummary(prompt, currentResponses); |
266 | | - return currentResponses; |
267 | | - }); |
268 | | - }, 500); |
269 | | - }; |
| 109 | + const result = await response.json(); |
270 | 110 |
|
271 | | - const refreshModel = (modelId: string, prompt: string) => { |
272 | | - if (modelId === 'chatgpt') { |
273 | | - generateSummary(prompt, responses); |
274 | | - } else { |
275 | | - callModel(modelId, prompt); |
| 111 | + if (!response.ok) { |
| 112 | + throw new Error(result.error?.message || "Grok Error"); |
276 | 113 | } |
| 114 | + |
| 115 | + return result.response || "No response from Grok"; |
277 | 116 | }; |
278 | 117 |
|
279 | 118 | return { |
280 | | - responses, |
281 | | - chatgptResponse, |
282 | | - submitPrompt, |
283 | | - refreshModel, |
284 | | - isLoading: Object.values(responses).some(r => r.isLoading) || chatgptResponse.isLoading |
| 119 | + loading, |
| 120 | + setLoading, |
| 121 | + callOpenAI, |
| 122 | + callGemini, |
| 123 | + callClaude, |
| 124 | + callLLaMA, |
| 125 | + callGrok, |
285 | 126 | }; |
286 | 127 | }; |
0 commit comments