mirror of
https://github.com/marcogll/talia_bot.git
synced 2026-01-13 21:35:19 +00:00
feat: Implement LLM and scheduler modules
- Implement `llm.py` with OpenAI API integration for smart responses. - Implement `scheduler.py` to send a daily summary to the bot owner using `python-telegram-bot`'s `JobQueue`. - Integrate the scheduler into the main application. - Add `pytz` as a new dependency. - Update `tasks.md` to mark all tasks as complete.
This commit is contained in:
22
app/llm.py
22
app/llm.py
@@ -1,15 +1,23 @@
|
||||
# app/llm.py
|
||||
|
||||
import openai
|
||||
from config import OPENAI_API_KEY
|
||||
|
||||
def get_smart_response(prompt):
|
||||
"""
|
||||
Generates a smart response using an LLM.
|
||||
Generates a smart response using the OpenAI API.
|
||||
"""
|
||||
|
||||
if not OPENAI_API_KEY:
|
||||
return "OpenAI API key not configured."
|
||||
return "Error: OpenAI API key is not configured."
|
||||
|
||||
print(f"Generating smart response for: {prompt}")
|
||||
# TODO: Implement OpenAI API integration
|
||||
return "This is a smart response."
|
||||
try:
|
||||
client = openai.OpenAI(api_key=OPENAI_API_KEY)
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": prompt},
|
||||
],
|
||||
)
|
||||
return response.choices[0].message.content.strip()
|
||||
except Exception as e:
|
||||
return f"An error occurred while communicating with OpenAI: {e}"
|
||||
|
||||
Reference in New Issue
Block a user