Skip to content

Commit b15223e

Browse files
authored
Merge pull request #23 from mohamed-em2m/patch-4
fix changing models bug
2 parents 9c1187f + f8faf05 commit b15223e

2 files changed

Lines changed: 19 additions & 3 deletions

File tree

memoryos-pypi/memoryos.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def __init__(self, user_id: str,
4444
self.assistant_id = assistant_id
4545
self.data_storage_path = os.path.abspath(data_storage_path)
4646
self.llm_model = llm_model
47+
os.environ["llm_model"]= llm_model
4748
self.mid_term_similarity_threshold = mid_term_similarity_threshold
4849

4950
print(f"Initializing Memoryos for user '{self.user_id}' and assistant '{self.assistant_id}'. Data path: {self.data_storage_path}")
@@ -328,4 +329,4 @@ def force_mid_term_analysis(self):
328329
self.mid_term_heat_threshold = original_threshold # Restore original threshold
329330

330331
def __repr__(self):
331-
return f"<Memoryos user_id='{self.user_id}' assistant_id='{self.assistant_id}' data_path='{self.data_storage_path}'>"
332+
return f"<Memoryos user_id='{self.user_id}' assistant_id='{self.assistant_id}' data_path='{self.data_storage_path}'>"

memoryos-pypi/utils.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,9 @@ def batch_chat_completion(self, requests):
7373
futures = []
7474
for req in requests:
7575
future = self.chat_completion_async(
76-
model=req.get("model", "gpt-4o-mini"),
76+
model=req.get("model",
77+
model=os.environ.get("llm_model")
78+
),
7779
messages=req["messages"],
7880
temperature=req.get("temperature", 0.7),
7981
max_tokens=req.get("max_tokens", 2000)
@@ -183,6 +185,7 @@ def compute_time_decay(event_timestamp_str, current_timestamp_str, tau_hours=24)
183185
# ---- LLM-based Utility Functions ----
184186

185187
def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"):
188+
model=os.environ.get("llm_model") or model
186189
dialog_text = "\n".join([f"User: {d.get('user_input','')} Assistant: {d.get('agent_response','')}" for d in dialogs])
187190
messages = [
188191
{"role": "system", "content": prompts.SUMMARIZE_DIALOGS_SYSTEM_PROMPT},
@@ -192,6 +195,7 @@ def gpt_summarize_dialogs(dialogs, client: OpenAIClient, model="gpt-4o-mini"):
192195
return client.chat_completion(model=model, messages=messages)
193196

194197
def gpt_generate_multi_summary(text, client: OpenAIClient, model="gpt-4o-mini"):
198+
model=os.environ.get("llm_model") or model
195199
messages = [
196200
{"role": "system", "content": prompts.MULTI_SUMMARY_SYSTEM_PROMPT},
197201
{"role": "user", "content": prompts.MULTI_SUMMARY_USER_PROMPT.format(text=text)}
@@ -211,6 +215,7 @@ def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini"
211215
Analyze and update user personality profile from dialogs
212216
结合现有画像和新对话,直接输出更新后的完整画像
213217
"""
218+
model=os.environ.get("llm_model") or model
214219
conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs])
215220
messages = [
216221
{"role": "system", "content": prompts.PERSONALITY_ANALYSIS_SYSTEM_PROMPT},
@@ -226,6 +231,7 @@ def gpt_user_profile_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini"
226231

227232
def gpt_knowledge_extraction(dialogs, client: OpenAIClient, model="gpt-4o-mini"):
228233
"""Extract user private data and assistant knowledge from dialogs"""
234+
model=os.environ.get("llm_model") or model
229235
conversation = "\n".join([f"User: {d.get('user_input','')} (Timestamp: {d.get('timestamp', '')})\nAssistant: {d.get('agent_response','')} (Timestamp: {d.get('timestamp', '')})" for d in dialogs])
230236
messages = [
231237
{"role": "system", "content": prompts.KNOWLEDGE_EXTRACTION_SYSTEM_PROMPT},
@@ -270,6 +276,7 @@ def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini",
270276
This function is kept for backward compatibility only.
271277
"""
272278
# Call the new functions
279+
model=os.environ.get("llm_model") or model
273280
profile = gpt_user_profile_analysis(dialogs, client, model, known_user_traits)
274281
knowledge_data = gpt_knowledge_extraction(dialogs, client, model)
275282

@@ -281,6 +288,7 @@ def gpt_personality_analysis(dialogs, client: OpenAIClient, model="gpt-4o-mini",
281288

282289

283290
def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="gpt-4o-mini"):
291+
model=os.environ.get("llm_model") or model
284292
messages = [
285293
{"role": "system", "content": prompts.UPDATE_PROFILE_SYSTEM_PROMPT},
286294
{"role": "user", "content": prompts.UPDATE_PROFILE_USER_PROMPT.format(old_profile=old_profile, new_analysis=new_analysis)}
@@ -289,6 +297,8 @@ def gpt_update_profile(old_profile, new_analysis, client: OpenAIClient, model="g
289297
return client.chat_completion(model=model, messages=messages)
290298

291299
def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"):
300+
model=os.environ.get("llm_model") or model
301+
292302
messages = [
293303
{"role": "system", "content": prompts.EXTRACT_THEME_SYSTEM_PROMPT},
294304
{"role": "user", "content": prompts.EXTRACT_THEME_USER_PROMPT.format(answer_text=answer_text)}
@@ -297,6 +307,9 @@ def gpt_extract_theme(answer_text, client: OpenAIClient, model="gpt-4o-mini"):
297307
return client.chat_completion(model=model, messages=messages)
298308

299309
def llm_extract_keywords(text, client: OpenAIClient, model="gpt-4o-mini"):
310+
311+
model=os.environ.get("llm_model") or model
312+
300313
messages = [
301314
{"role": "system", "content": prompts.EXTRACT_KEYWORDS_SYSTEM_PROMPT},
302315
{"role": "user", "content": prompts.EXTRACT_KEYWORDS_USER_PROMPT.format(text=text)}
@@ -309,7 +322,8 @@ def llm_extract_keywords(text, client: OpenAIClient, model="gpt-4o-mini"):
309322
def check_conversation_continuity(previous_page, current_page, client: OpenAIClient, model="gpt-4o-mini"):
310323
prev_user = previous_page.get("user_input", "") if previous_page else ""
311324
prev_agent = previous_page.get("agent_response", "") if previous_page else ""
312-
325+
model=os.environ.get("llm_model") or model
326+
313327
user_prompt = prompts.CONTINUITY_CHECK_USER_PROMPT.format(
314328
prev_user=prev_user,
315329
prev_agent=prev_agent,
@@ -324,6 +338,7 @@ def check_conversation_continuity(previous_page, current_page, client: OpenAICli
324338
return response.strip().lower() == "true"
325339

326340
def generate_page_meta_info(last_page_meta, current_page, client: OpenAIClient, model="gpt-4o-mini"):
341+
model=os.environ.get("llm_model") or model
327342
current_conversation = f"User: {current_page.get('user_input', '')}\nAssistant: {current_page.get('agent_response', '')}"
328343
user_prompt = prompts.META_INFO_USER_PROMPT.format(
329344
last_meta=last_page_meta if last_page_meta else "None",

0 commit comments

Comments
 (0)