mirror of
https://github.com/marcogll/talia_bot.git
synced 2026-01-13 21:35:19 +00:00
This commit finalizes Phase 4 of the project by implementing the LLM and scheduler integrations. - Implements `get_smart_response` in `app/llm.py` to generate AI-powered responses using the OpenAI API. - Implements a daily summary scheduler in `app/scheduler.py` using the `JobQueue` from `python-telegram-bot` for better integration with the application's event loop. - Adds `get_events_for_day` to `app/calendar.py` to fetch daily events for the summary. - Integrates the scheduler into the main application loop in `app/main.py`. - Improves the date formatting in the daily summary for better readability. - Updates `tasks.md` to reflect the completion of Phase 4.
27 lines
755 B
Python
27 lines
755 B
Python
# app/llm.py
|
|
|
|
import openai
|
|
from app.config import OPENAI_API_KEY
|
|
|
|
def get_smart_response(prompt):
|
|
"""
|
|
Generates a smart response using an LLM.
|
|
"""
|
|
if not OPENAI_API_KEY:
|
|
return "OpenAI API key not configured."
|
|
|
|
openai.api_key = OPENAI_API_KEY
|
|
|
|
try:
|
|
response = openai.ChatCompletion.create(
|
|
model="gpt-3.5-turbo",
|
|
messages=[
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": prompt},
|
|
],
|
|
)
|
|
return response.choices[0].message.content.strip()
|
|
except Exception as e:
|
|
print(f"An error occurred with OpenAI: {e}")
|
|
return "I'm sorry, I couldn't generate a response right now."
|