"""
Database logger for user-centric workflow execution logging.
"""

import logging
from datetime import datetime
from typing import Dict, Any, Optional, List
import uuid
import json
import io
from operator import itemgetter

from .database_connection import DatabaseConnection
from ..models.execution_context import ExecutionContext
from ..models.execution_result import ExecutionResult

logger = logging.getLogger(__name__)


class DatabaseLogger:
    """
    Database logger for tracking workflow executions, node executions, and user activities.
    Provides user-centric logging with comprehensive audit trails.
    """
    
    def __init__(self, db_connection: DatabaseConnection = None):
        """
        Initialize DatabaseLogger with database connection.
        
        Args:
            db_connection: DatabaseConnection instance (creates new if None)
        """
        self.db_connection = db_connection or DatabaseConnection()
    
    def log_workflow_start(self, 
                        execution_id: str,
                        workflow_id: str,
                        user_id: str,
                        context_data: Dict[str, Any] = None) -> None:
        """
        Log the start of a workflow execution.
        
        Args:
            execution_id: Unique execution identifier
            workflow_id: Workflow identifier
            user_id: User identifier
            context_data: Initial context data
        """
        try:
            query = """
            INSERT INTO workflow_executions 
            (execution_id, workflow_id, user_id, status, start_time, context_data)
            VALUES (%s, %s, %s, %s, %s, %s)
            """
            
            params = (
                execution_id,
                workflow_id,
                user_id,
                'started',
                datetime.utcnow(),
                json.dumps(context_data) if context_data else None
            )
            
            self.db_connection.execute_query(query, params)
            
            # Log user activity
            self.log_user_activity(
                user_id=user_id,
                activity_type='workflow_started',
                workflow_id=workflow_id,
                execution_id=execution_id,
                details={'context_data': context_data}
            )
            
            logger.info(f"Workflow start logged: execution_id={execution_id}, workflow_id={workflow_id}, user_id={user_id}")
            
        except Exception as e:
            logger.error(f"Failed to log workflow start: {e}")
            raise
    
    def log_workflow_completion(self,
                            execution_id: str,
                            status: str,
                            context_data: Dict[str, Any] = None,
                            error_message: str = None) -> None:
        """
        Log the completion of a workflow execution.
        
        Args:
            execution_id: Unique execution identifier
            status: Final status ('completed', 'failed', 'cancelled')
            context_data: Final context data
            error_message: Error message if status is 'failed'
        """
        try:
            query = """
            UPDATE workflow_executions 
            SET status = %s, end_time = %s, context_data = %s, error_message = %s
            WHERE execution_id = %s
            """
            
            params = (
                status,
                datetime.utcnow(),
                json.dumps(context_data) if context_data else None,
                error_message,
                execution_id
            )
            
            self.db_connection.execute_query(query, params)
            
            # Get workflow and user info for activity logging
            workflow_info = self.get_workflow_execution(execution_id)
            if workflow_info:
                self.log_user_activity(
                    user_id=workflow_info['user_id'],
                    activity_type=f'workflow_{status}',
                    workflow_id=workflow_info['workflow_id'],
                    execution_id=execution_id,
                    details={
                        'final_context': context_data,
                        'error_message': error_message
                    }
                )
            
            logger.info(f"Workflow completion logged: execution_id={execution_id}, status={status}")
            
        except Exception as e:
            logger.error(f"Failed to log workflow completion: {e}")
            raise
    
    def log_node_execution_start(self,
                                execution_id: str,
                                node_id: str,
                                node_type: str,
                                node_name: str = None,
                                input_data: Dict[str, Any] = None) -> int:
        """
        Log the start of a node execution.
        
        Args:
            execution_id: Workflow execution identifier
            node_id: Node identifier
            node_type: Type of node
            node_name: Human-readable node name
            input_data: Input data for the node
            
        Returns:
            Database ID of the created node execution record
        """
        try:
            query = """
            INSERT INTO node_executions 
            (execution_id, node_id, node_type, node_name, status, input_data, executed_at)
            VALUES (%s, %s, %s, %s, %s, %s, %s)
            RETURNING id
            """
            
            params = (
                execution_id,
                node_id,
                node_type,
                node_name,
                'started',
                json.dumps(input_data) if input_data else None,
                datetime.utcnow()
            )
            
            node_execution_id = self.db_connection.insert_and_return_id(query, params)
            
            logger.debug(f"Node execution start logged: node_id={node_id}, type={node_type}, db_id={node_execution_id}")
            
            return node_execution_id
            
        except Exception as e:
            logger.error(f"Failed to log node execution start: {e}")
            raise
    
    def log_node_execution_completion(self,
                                    node_execution_id: int,
                                    status: str,
                                    output_data: Dict[str, Any] = None,
                                    execution_time_ms: int = None,
                                    error_message: str = None) -> None:
        """
        Log the completion of a node execution.
        
        Args:
            node_execution_id: Database ID of the node execution record
            status: Final status ('completed', 'failed', 'skipped')
            output_data: Output data from the node
            execution_time_ms: Execution time in milliseconds
            error_message: Error message if status is 'failed'
        """
        try:
            query = """
            UPDATE node_executions 
            SET status = %s, output_data = %s, execution_time_ms = %s, error_message = %s
            WHERE id = %s
            """
            
            params = (
                status,
                json.dumps(output_data) if output_data else None,
                execution_time_ms,
                error_message,
                node_execution_id
            )
            
            self.db_connection.execute_query(query, params)
            
            logger.debug(f"Node execution completion logged: db_id={node_execution_id}, status={status}")
            
        except Exception as e:
            logger.error(f"Failed to log node execution completion: {e}")
            raise

    def log_user_pseudo_id(self,
                        workflow_id: str = None,
                        execution_id: str = None,
                        node_id:str = None,
                        user_pseudo_id: str = None,
                        status: str = None) -> None:
        try:
            query = """
            INSERT INTO workflow_user_logs 
            (execution_id, node_id, workflow_id, user_pseudo_id, status, created_at, updated_at)
            VALUES (%s, %s, %s, %s, %s, %s, %s)
            RETURNING id
            """
            now = datetime.utcnow()
            params = (execution_id, node_id, workflow_id, user_pseudo_id, status, now, now)
            # values = [
            #     (execution_id, node_id, workflow_id, uid, status, now, now)
            #     for uid in user_pseudo_id
            # ]
            
            node_execution_id = self.db_connection.insert_and_return_id(query, params)
            
            # logger.debug(f"Node execution start logged: node_id={node_id}, type={node_type}, db_id={node_execution_id}")
            
            return node_execution_id
            
        except Exception as e:
            logger.error(f"Failed to log node execution node: {e}")
            raise
    
    def log_user_waiting_node(self,
                        workflow_id: str = None,
                        execution_id: str = None,
                        node_id: str = None,
                        user_pseudo_id: str = None,
                        waiting_type: str = None,
                        condition: str = None,
                        wait_until: str = None,
                        status: str = None,
                        is_check: int = 0,
                        executed_at: str = None) -> str:

        try:
            query = """
            INSERT INTO workflow_user_wating 
            (execution_id, workflow_id, node_id, user_pseudo_id, waiting_type, condition, wait_until, status, is_check, executed_at)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
            RETURNING id
            """
            now = datetime.utcnow()
            params = (execution_id, workflow_id, node_id, user_pseudo_id, waiting_type, condition, wait_until, status, is_check, executed_at)
            
            node_execution_id = self.db_connection.insert_and_return_id(query, params)
            
            return node_execution_id
            
        except Exception as e:
            logger.error(f"Failed to log node execution node: {e}")
            raise
    
    def log_user_pseudo_ids(self,
                        execution_id: str = None,
                        workflow_id: str = None,
                        node_id: str = None,
                        user_pseudo_ids: List[str] = None,
                        status: str = None) -> None:
        try:
            if not user_pseudo_ids:
                return []

            query = """
            INSERT INTO workflow_user_logs 
            (execution_id, node_id, workflow_id, user_pseudo_id, status, created_at, updated_at)
            VALUES (%s, %s, %s, %s, %s, %s, %s)
            RETURNING id
            """

            now = datetime.utcnow()

            # Build list of tuples
            values = [
                {
                    "execution_id": execution_id,
                    "node_id": node_id,
                    "workflow_id": workflow_id,
                    "user_pseudo_id": uid,
                    "status": status,
                    "created_at": now,
                    "updated_at": now
                }
                for uid in user_pseudo_ids
            ]

            # psycopg2/asyncpg: use execute_values for batch insert
            # Adjust depending on your DB wrapper
            node_execution_id = self.db_connection.insert_many_and_return_ids(query, values)

            return node_execution_id

        except Exception as e:
            logger.error(f"Failed to batch log node executions: {e}")
            raise

    def log_user_pseudo_ids_copy(self,
                                workflow_id: str = None,
                                execution_id: str = None,
                                node_id: str = None,
                                user_pseudo_ids: list[str] = None,
                                status: str = None) -> None:
        try:
            if not user_pseudo_ids:
                return []

            now = datetime.utcnow()

            # Prepare CSV-like buffer for COPY
            buffer = io.StringIO()
            for uid in user_pseudo_ids:
                buffer.write(
                    f"{execution_id}\t{node_id}\t{workflow_id}\t{uid}\t{status}\t{now.isoformat()}\t{now.isoformat()}\n"
                )
            buffer.seek(0)
            raw_conn = self.db_connection.pool.raw_connection()
            try:
                cur = raw_conn.cursor()
                try:
                    copy_sql = """
                        COPY workflow_user_logs
                        (execution_id, node_id, workflow_id, user_pseudo_id, status, created_at, updated_at)
                        FROM STDIN WITH (FORMAT text, DELIMITER E'\t')
                    """
                    cur.copy(copy_sql, buffer.read())  # pg8000 expects string, not file-like
                finally:
                    pass
                raw_conn.commit()
            finally:
                pass

            return True

        except Exception as e:
            logger.error(f"Failed to batch log node executions via COPY: {e}")
            raise
        
    def log_user_activity(self,
                        user_id: str,
                        activity_type: str,
                        workflow_id: str = None,
                        execution_id: str = None,
                        details: Dict[str, Any] = None) -> None:
        """
        Log user activity for audit trails.
        
        Args:
            user_id: User identifier
            activity_type: Type of activity
            workflow_id: Related workflow identifier
            execution_id: Related execution identifier
            details: Additional activity details
        """
        try:
            query = """
            INSERT INTO user_activity_logs 
            (user_id, activity_type, workflow_id, execution_id, details, timestamp)
            VALUES (%s, %s, %s, %s, %s, %s)
            """
            
            params = (
                user_id,
                activity_type,
                workflow_id,
                execution_id,
                json.dumps(details) if details else None,
                datetime.utcnow()
            )
            
            self.db_connection.execute_query(query, params)
            
            logger.debug(f"User activity logged: user_id={user_id}, activity={activity_type}")
            
        except Exception as e:
            logger.error(f"Failed to log user activity: {e}")
            # Don't raise for activity logging failures to avoid disrupting workflow
    
    def get_workflow_execution(self, execution_id: str) -> Optional[Dict[str, Any]]:
        """
        Get workflow execution details by execution ID.
        
        Args:
            execution_id: Execution identifier
            
        Returns:
            Workflow execution details or None if not found
        """
        try:
            query = """
            SELECT execution_id, workflow_id, user_id, status, start_time, end_time, 
                context_data, error_message, created_at, updated_at
            FROM workflow_executions 
            WHERE execution_id = %s
            """
            
            result = self.db_connection.execute_query(query, (execution_id,), fetch=True)
            
            if result:
                execution = dict(result[0])
                # Parse JSON fields
                if execution['context_data']:
                    execution['context_data'] = json.loads(execution['context_data'])
                return execution
            
            return None
            
        except Exception as e:
            logger.error(f"Failed to get workflow execution: {e}")
            raise
    
    def get_workflow_executions_by_user(self,
                                    user_id: str,
                                    workflow_id: str = None,
                                    status: str = None,
                                    limit: int = 100,
                                    offset: int = 0) -> List[Dict[str, Any]]:
        """
        Get workflow executions for a specific user with optional filtering.
        
        Args:
            user_id: User identifier
            workflow_id: Optional workflow filter
            status: Optional status filter
            limit: Maximum number of results
            offset: Result offset for pagination
            
        Returns:
            List of workflow execution records
        """
        try:
            base_query = """
            SELECT execution_id, workflow_id, user_id, status, start_time, end_time, 
                   context_data, error_message, created_at, updated_at
            FROM workflow_executions 
            WHERE user_id = %s
            """
            
            params = [user_id]
            
            if workflow_id:
                base_query += " AND workflow_id = %s"
                params.append(workflow_id)
            
            if status:
                base_query += " AND status = %s"
                params.append(status)
            
            base_query += " ORDER BY start_time DESC LIMIT %s OFFSET %s"
            params.extend([limit, offset])
            
            results = self.db_connection.execute_query(base_query, tuple(params), fetch=True)
            
            executions = []
            for result in results:
                execution = dict(result)
                # Parse JSON fields
                if execution['context_data']:
                    execution['context_data'] = json.loads(execution['context_data'])
                executions.append(execution)
            
            return executions
            
        except Exception as e:
            logger.error(f"Failed to get workflow executions by user: {e}")
            raise
    
    def get_node_executions_by_workflow(self, execution_id: str) -> List[Dict[str, Any]]:
        """
        Get all node executions for a specific workflow execution.
        
        Args:
            execution_id: Workflow execution identifier
            
        Returns:
            List of node execution records
        """
        try:
            query = """
            SELECT id, execution_id, node_id, node_type, node_name, status, 
                input_data, output_data, execution_time_ms, error_message, executed_at
            FROM node_executions 
            WHERE execution_id = %s
            ORDER BY executed_at ASC
            """
            
            results = self.db_connection.execute_query(query, (execution_id,), fetch=True)
            
            node_executions = []
            for result in results:
                node_execution = dict(result)
                # Parse JSON fields
                if node_execution['input_data']:
                    node_execution['input_data'] = json.loads(node_execution['input_data'])
                if node_execution['output_data']:
                    node_execution['output_data'] = json.loads(node_execution['output_data'])
                node_executions.append(node_execution)
            
            return node_executions
            
        except Exception as e:
            logger.error(f"Failed to get node executions by workflow: {e}")
            raise
    
    def get_user_activity_logs(self,
                            user_id: str,
                            activity_type: str = None,
                            workflow_id: str = None,
                            limit: int = 100,
                            offset: int = 0) -> List[Dict[str, Any]]:
        """
        Get user activity logs with optional filtering.
        
        Args:
            user_id: User identifier
            activity_type: Optional activity type filter
            workflow_id: Optional workflow filter
            limit: Maximum number of results
            offset: Result offset for pagination
            
        Returns:
            List of user activity log records
        """
        try:
            base_query = """
            SELECT id, user_id, activity_type, workflow_id, execution_id, details, timestamp
            FROM user_activity_logs 
            WHERE user_id = %s
            """
            
            params = [user_id]
            
            if activity_type:
                base_query += " AND activity_type = %s"
                params.append(activity_type)
            
            if workflow_id:
                base_query += " AND workflow_id = %s"
                params.append(workflow_id)
            
            base_query += " ORDER BY timestamp DESC LIMIT %s OFFSET %s"
            params.extend([limit, offset])
            
            results = self.db_connection.execute_query(base_query, tuple(params), fetch=True)
            
            activities = []
            for result in results:
                activity = dict(result)
                # Parse JSON fields
                if activity['details']:
                    activity['details'] = json.loads(activity['details'])
                activities.append(activity)
            
            return activities
            
        except Exception as e:
            logger.error(f"Failed to get user activity logs: {e}")
            raise
    
    def get_execution_statistics(self, user_id: str = None, workflow_id: str = None) -> Dict[str, Any]:
        """
        Get execution statistics with optional filtering.
        
        Args:
            user_id: Optional user filter
            workflow_id: Optional workflow filter
            
        Returns:
            Dictionary with execution statistics
        """
        try:
            base_query = """
            SELECT 
                COUNT(*) as total_executions,
                COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_executions,
                COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_executions,
                COUNT(CASE WHEN status = 'running' THEN 1 END) as running_executions,
                AVG(EXTRACT(EPOCH FROM (end_time - start_time))) as avg_execution_time_seconds
            FROM workflow_executions 
            WHERE 1=1
            """
            
            params = []
            
            if user_id:
                base_query += " AND user_id = %s"
                params.append(user_id)
            
            if workflow_id:
                base_query += " AND workflow_id = %s"
                params.append(workflow_id)
            
            result = self.db_connection.execute_query(base_query, tuple(params), fetch=True)
            
            if result:
                stats = dict(result[0])
                # Convert avg_execution_time_seconds to float if not None
                if stats['avg_execution_time_seconds']:
                    stats['avg_execution_time_seconds'] = float(stats['avg_execution_time_seconds'])
                return stats
            
            return {
                'total_executions': 0,
                'completed_executions': 0,
                'failed_executions': 0,
                'running_executions': 0,
                'avg_execution_time_seconds': None
            }
            
        except Exception as e:
            logger.error(f"Failed to get execution statistics: {e}")
            raise
    
    def add_new_automation(self, json:dict):
        try:
            property_id = json.get("property_id")
            automation_id = json.get("automation_id")
            automation_name = json.get("automation_name")
            current_version = int(json.get("current_version"))
            status = 'inactive'
            path_to_json_flow = f"account/{property_id}/automation_v2/{automation_id}"

            query = """
            INSERT INTO automation_context 
            (property_id, automation_id, automation_name, current_version, status, path_to_json_flow)
            VALUES (%s, %s, %s, %s, %s, %s)
            """
            params = (property_id, automation_id, automation_name, current_version, status, path_to_json_flow)
            
            node_execution_id = self.db_connection.insert_row(query, params)
            print(node_execution_id)
            
            return node_execution_id
        
        except Exception as e:
            logger.error(f"Failed to get execution statistics: {e}")
            raise
    def generate_trigger(self, property_id:str, automation_id:str, version: int, trigger_list:list):
        for time_trigger in trigger_list:
            query = """
                INSERT INTO trigger_manager 
                (property_id, execution_id, datetime_trigger, automation_id, version, path_to_json_flow, start_time, type)
                VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
            """
            params = (property_id, str(uuid.uuid4()), time_trigger, automation_id, version, f"account/{property_id}/automation_v2/{automation_id}", time_trigger, 'schedule')
            node_execution_id = self.db_connection.insert_row(query, params)
        
        return True
    
    def generate_trigger_manual(self, property_id:str, automation_id:str, execution_id:str, version: int):
        query = """
            INSERT INTO trigger_manager 
            (property_id, execution_id, datetime_trigger, automation_id, version, path_to_json_flow, start_time, type)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        """
        now = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
        params = (property_id, execution_id, now, automation_id, version, f"account/{property_id}/automation_v2/{automation_id}", now, 'manual')
        node_execution_id = self.db_connection.insert_row(query, params)
        
        return True

    def update_workflow_status(self, property_id:str, automation_id:str, automation_status:int = 0):
        query1 = f"""
            UPDATE trigger_manager
            SET automation_status = {automation_status}
            WHERE property_id='{property_id}' and automation_id='{automation_id}';
        """
        status = 'active' if automation_status == 1 else (
            'archived' if automation_status == 2 else 'created'
        )
        query2 = f"""
            UPDATE automation_context
            SET status = '{status}'
            WHERE property_id='{property_id}' and automation_id='{automation_id}';
        """

        self.db_connection.update(query1)
        self.db_connection.update(query2)
    
    def get_active_workflow(self, now, timezone:str='Asia/Bangkok'):
        query = f"""
        SELECT property_id, automation_id, execution_id, path_to_json_flow
        FROM trigger_manager
        WHERE automation_status = 1 and status = 'pending' AND datetime_trigger = '{now}';
        """
        result = self.db_connection.query(query)

        return result
    
    def get_running_stage_unique_node_context(self):
        query = f"""
        SELECT property_id, automation_id, execution_id, current_node_id, current_node_type, next_node_id, next_node_type
        FROM user_pseudo_running_stage
        WHERE status = 0 and is_moved = 0
        GROUP BY 1,2,3,4,5,6,7
        """
        result = self.db_connection.query(query)

        return result
    
    def get_running_stage_context(self):
        query = f"""
        SELECT *
        FROM user_pseudo_running_stage
        WHERE status = 0 and is_moved = 0
        """
        result = self.db_connection.query(query)

        return result
    
    def get_waiting_stage_context(self):
        query = f"""
        SELECT *
        FROM user_pseudo_running_stage
        WHERE status = 3 and current_node_type = 'wait'
        """
        result = self.db_connection.query(query)

        return result
    
    def update_work_flow_status(self, property_id, automation_id, status):
        if status == 0 or status == False:
            str_status = 'inactive'
        elif status == 1 or status == True:
            str_status = 'active'
        elif status == 3:
            str_status = 'archived'
            
        query = f"""
        UPDATE automation_context
        SET status = '{str_status}'
        WHERE property_id = '{property_id}' AND automation_id = '{automation_id}';
        """
        result = self.db_connection.update(query)
        return result        
    
    def update_trigger_mannager(self, property_id, automation_id, status):
        status = 1 if status == True or status == 1 else 0
        query = f"""
        UPDATE trigger_manager
        SET automation_status = {status}
        WHERE property_id = '{property_id}' AND automation_id = '{automation_id}' AND status = 'pending';
        """
        result = self.db_connection.update(query)
        return result
    
    def update_trigger_mannager_status(self, property_id, automation_id, execution_id, status, endtime=None):
        if not endtime:
            query = f"""
            UPDATE trigger_manager
            SET status = '{status}'
            WHERE property_id = '{property_id}' AND automation_id = '{automation_id}' AND execution_id = '{execution_id}';
            """
            result = self.db_connection.update(query)
            return result
        else:
            query = f"""
            UPDATE trigger_manager
            SET 
                status = '{status}',
                end_time = '{endtime}'
            WHERE property_id = '{property_id}' AND automation_id = '{automation_id}' AND execution_id = '{execution_id}';
            """
            result = self.db_connection.update(query)
            return result
    
    def add_data_to_running_stage(self, data:dict) -> None:
        property_id = data.get("property_id")
        automation_id = data["automation_id"]
        execution_id = data["execution_id"]
        user_pseudo_id = data["user_pseudo_id"]
        current_node_id = data['current_node_id']
        current_node_type = data['current_node_type']
        current_node_json = data['current_node_json']
        next_node_id = data["next_node_id"]
        next_node_type = data["next_node_type"]
        next_node_json = data['next_node_json']
        wait_until = data.get("wait_until",'-')
        input_data = data.get("input_data", {})
        status = data.get("status", 0)
        is_moved = data.get("is_moved", 0)
        execution_time_ms = data.get("execution_time_ms", 0)
        error_message = data.get("error_message")
        
        query = """
            INSERT INTO user_pseudo_running_stage 
            (property_id, automation_id, execution_id, user_pseudo_id, current_node_id, current_node_type, current_node_json, next_node_id, next_node_type, next_node_json, wait_until, input_data, status, is_moved, execution_time_ms, error_message)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        now = datetime.utcnow()
        params = (property_id, automation_id, execution_id, user_pseudo_id, current_node_id, current_node_type, current_node_json, next_node_id, next_node_type, next_node_json, wait_until, input_data, status, is_moved, execution_time_ms, error_message)
        id = self.db_connection.insert_row(query, params)
    
        return True
    
    def update_user_status_running_stage(self, execution_id:str, node_id:str, user_pseudo_id:str, status:int):
        query = f"""
        UPDATE user_pseudo_running_stage
        SET status = {status}
        WHERE execution_id = '{execution_id}' AND current_node_id = '{node_id}' AND user_pseudo_id = '{user_pseudo_id}';
        """
        self.db_connection.update(query)
    
    def update_user_status_running_stage_wait_node(self, execution_id:str, current_node_id:str, user_pseudo_id:str, status:int, next_node_id:str, next_node_type:str, wait_until:str):
        query = f"""
        UPDATE user_pseudo_running_stage
        SET 
            status = {status},
            current_node_id = '{current_node_id}',
            current_node_type = 'wait',
            next_node_id = '{next_node_id}',
            next_node_type = '{next_node_type}',
            wait_until = '{wait_until}'

        WHERE execution_id = '{execution_id}' AND user_pseudo_id = '{user_pseudo_id}';
        """
        self.db_connection.update(query)
    
    def log_node_context(self, data:dict) -> True:
        property_id = data.get("property_id")
        automation_id = data["automation_id"]
        execution_id = data["execution_id"]
        user_pseudo_id = data["user_pseudo_id"]
        node_id = data['node_id']
        node_type = data['node_type']
        node_name = data['node_name']
        input_data = data.get('input_data', {})
        output_data = data.get('output_data', {})
        status = data.get('status', None)
        execution_time_ms = data.get('execution_time_ms', 0)
        error_message = data.get('error_message', '-')
        execution_time = data.get("execution_time", datetime.now().isoformat())

        query = """
            INSERT INTO node_context 
            (property_id, automation_id, execution_id, user_pseudo_id, node_id, node_type, node_name, input_data, output_data, status, execution_time_ms, error_message, execution_time)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        params = (property_id, automation_id, execution_id, user_pseudo_id, node_id, node_type, node_name, input_data, output_data, status, execution_time_ms, error_message, execution_time)
        id = self.db_connection.insert_row(query, params)
    
        return True
    
    def update_running_stage(self, data:dict) -> True:
        is_end_node = data['is_end_node']
        current_node_type = data['current_node_type']
        current_node_json = data['current_node_json']
        next_node_id = data["next_node_id"]
        next_node_type = data["next_node_type"]
        next_node_json = data['next_node_json']
        wait_until = data.get("wait_until",'-')
        input_data = data.get("input_data", {})
        status = data.get("status", 0)
        is_moved = data.get("is_moved", 0)
        execution_time_ms = data.get("execution_time_ms", 0)

        
        pass
    
    def get_automation_execution(self, property_id:str, automation_id:str, execution_id:str = 'all',timezone:str = 'Asia/Bangkok'):
        execution_id = None if execution_id == 'all' else execution_id
        query = f"""
        SELECT
            CAST(datetime_trigger AT TIME ZONE 'UTC' AT TIME ZONE '{timezone}' AS TEXT) AS datetime_trigger,
            property_id,
            automation_id,
            CAST(execution_id AS TEXT) AS execution_id,
            type
        FROM trigger_manager
        WHERE property_id = '{property_id}'
            AND automation_id = '{automation_id}'
            AND status != 'pending'
        """
        if execution_id:
            query += f" AND execution_id = '{execution_id}'"
        result = self.db_connection.query(query)
        return result

    def get_execution_data(self, property_id: str, automation_id: str, execution_id: str = "all"):
        # 1. Prepare the WHERE clause (reused for both queries)
        where_clause = f"""
            property_id = '{property_id}'
            AND automation_id = '{automation_id}'
        """

        if execution_id != "all":
            where_clause += f" AND execution_id = '{execution_id}'"

        # 2. Fetch Node Data (Logs)
        query_nodes = f"""
            SELECT *
            FROM node_context
            WHERE {where_clause};
        """
        node_results = self.db_connection.query(query_nodes)

        # 3. Fetch Execution Context Data
        # NOTE: Added execution_id and datetime_trigger to SELECT to ensure we can map them
        query_context = f"""
            SELECT 
                CAST(execution_id AS TEXT) as execution_id,
                automation_id, 
                type, 
                status, 
                CAST(start_time AS TEXT) as start_time, 
                CAST(end_time AS TEXT) as end_time,
                CAST(datetime_trigger AS TEXT) as datetime_trigger,
                version
            FROM trigger_manager
            WHERE {where_clause};
        """
        context_results = self.db_connection.query(query_context)
        
        # 4. Create Context Map (O(1) Lookup)
        context_map = {str(item['execution_id']): item for item in context_results}

        # 5. Build the Hierarchy
        grouped_data_mapping = {}

        for entry in node_results:
            # Normalize IDs to strings to ensure matching
            auth_id = str(entry.get('automation_id'))
            exec_id = str(entry.get('execution_id'))
            node_id = str(entry.get('node_id'))
            
            # --- Level 1: Automation ID ---
            if auth_id not in grouped_data_mapping:
                grouped_data_mapping[auth_id] = {}
                
            # --- Level 2: Execution ID (Enriched with Context) ---
            if exec_id not in grouped_data_mapping[auth_id]:
                # Retrieve context from our map, default to empty dict if missing
                context_info = context_map.get(exec_id, {})
                
                grouped_data_mapping[auth_id][exec_id] = {
                    "context_details": {
                        "type": context_info.get("type"),
                        "trigger_datetime": context_info.get("datetime_trigger"),
                        "start_time": context_info.get("start_time"),
                        "end_time": context_info.get("end_time"),
                        "status": context_info.get("status"),
                        "version": context_info.get("version")
                    },
                    "nodes": {}
                }
                
            # --- Level 3: Node ID ---
            nodes_dict = grouped_data_mapping[auth_id][exec_id]["nodes"]
            
            if node_id not in nodes_dict:
                nodes_dict[node_id] = {
                    "node_type": entry.get("node_type"),
                    "node_name": entry.get("node_name"),
                    "user_executions": []
                }
            
            execution_time = entry.get("execution_time")

            # --- Level 4: User Execution Details ---
            user_detail = {
                "user_pseudo_id": str(entry.get("user_pseudo_id")),
                "status": entry.get("status"),
                "execution_time": execution_time.strftime(format='%Y-%m-%d %H:%M:%S'),
                "execution_time_ms": entry.get("execution_time_ms"),
                "error_message": entry.get("error_message"),
                "input_data": entry.get("input_data"),
                "output_data": entry.get("output_data")
            }
            
            nodes_dict[node_id]["user_executions"].append(user_detail)

        # 6. Sort user executions by time (Optional but recommended)
        for auth in grouped_data_mapping.values():
            for exec_obj in auth.values():
                for node in exec_obj["nodes"].values():
                    node["user_executions"].sort(key=itemgetter("execution_time"))

        return grouped_data_mapping
    def clear_user_from_running_stage(self, property_id:str, automation_id:str, execution_id:str):
        query = f"""
        DELETE FROM user_pseudo_running_stage
            WHERE property_id = '{property_id}'
            AND automation_id = '{automation_id}'
            AND execution_id = '{execution_id}'
        """
        result = self.db_connection.query(query,fetch='delete')
        return result