mirror of
https://github.com/marcogll/telegram_expenses_controller.git
synced 2026-01-13 13:25:15 +00:00
43 lines
1.3 KiB
Python
43 lines
1.3 KiB
Python
"""
|
|
AI-powered classification and confidence scoring.
|
|
"""
|
|
import openai
|
|
import json
|
|
import logging
|
|
from typing import Dict, Any
|
|
|
|
from app.config import config
|
|
from app.ai.prompts import AUDITOR_PROMPT
|
|
from app.schema.base import ProvisionalExpense
|
|
|
|
# Configure the OpenAI client
|
|
openai.api_key = config.OPENAI_API_KEY
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
def classify_and_audit(expense: ProvisionalExpense) -> ProvisionalExpense:
|
|
"""
|
|
Uses an AI model to audit an extracted expense, providing a confidence
|
|
score and notes. This is a placeholder for a more complex classification
|
|
and validation logic.
|
|
|
|
Args:
|
|
expense: A ProvisionalExpense object with extracted data.
|
|
|
|
Returns:
|
|
The same ProvisionalExpense object, updated with the audit findings.
|
|
"""
|
|
logger.info(f"Starting AI audit for expense: {expense.extracted_data.description}")
|
|
|
|
# For now, this is a placeholder. A real implementation would
|
|
# call an AI model like in the extractor.
|
|
# For demonstration, we'll just assign a high confidence score.
|
|
|
|
expense.confidence_score = 0.95
|
|
expense.validation_notes.append("AI audit placeholder: auto-approved.")
|
|
expense.processing_method = "ai_inference" # Assume AI was used
|
|
|
|
logger.info("AI audit placeholder complete.")
|
|
|
|
return expense
|