mirror of
https://github.com/marcogll/talia_bot.git
synced 2026-01-13 21:35:19 +00:00
feat: Implement LLM and scheduler functionalities
This commit finalizes Phase 4 of the project by implementing the LLM and scheduler integrations. - Implements `get_smart_response` in `app/llm.py` to generate AI-powered responses using the OpenAI API. - Implements a daily summary scheduler in `app/scheduler.py` using the `JobQueue` from `python-telegram-bot` for better integration with the application's event loop. - Adds `get_events_for_day` to `app/calendar.py` to fetch daily events for the summary. - Integrates the scheduler into the main application loop in `app/main.py`. - Improves the date formatting in the daily summary for better readability. - Updates `tasks.md` to reflect the completion of Phase 4.
This commit is contained in:
21
app/llm.py
21
app/llm.py
@@ -1,15 +1,26 @@
|
||||
# app/llm.py
|
||||
|
||||
from config import OPENAI_API_KEY
|
||||
import openai
|
||||
from app.config import OPENAI_API_KEY
|
||||
|
||||
def get_smart_response(prompt):
|
||||
"""
|
||||
Generates a smart response using an LLM.
|
||||
"""
|
||||
|
||||
if not OPENAI_API_KEY:
|
||||
return "OpenAI API key not configured."
|
||||
|
||||
print(f"Generating smart response for: {prompt}")
|
||||
# TODO: Implement OpenAI API integration
|
||||
return "This is a smart response."
|
||||
openai.api_key = OPENAI_API_KEY
|
||||
|
||||
try:
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
)
|
||||
return response.choices[0].message.content.strip()
|
||||
except Exception as e:
|
||||
print(f"An error occurred with OpenAI: {e}")
|
||||
return "I'm sorry, I couldn't generate a response right now."
|
||||
|
||||
Reference in New Issue
Block a user