# app.py
from flask import Flask, request, render_template, jsonify, session, redirect, url_for, flash
import os
import uuid
import json
import time
import base64
import requests
import threading  # Added for background analysis
from datetime import datetime, timedelta, time as dt_time
from werkzeug.utils import secure_filename
from dotenv import load_dotenv

# ⚡ CRITICAL: Load environment variables FIRST, before any other imports that need them
load_dotenv('config/.env')

# Security imports
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
from flask_wtf.csrf import CSRFProtect
from extensions import limiter  # Import limiter from extensions to avoid circular imports
import secrets
import re

# Now import everything else that might need environment variables
from improved_document_processor import ImprovedDocumentProcessor
import openai
from summarization_routes import summarization_bp
from PIL import Image
import io

# Subscription-related imports (these need env vars to be loaded first)
from config.stripe_config import DEFAULT_PLANS
from services.stripe_service import init_stripe
from subscription_routes import subscription_bp, check_usage_limit

# Other imports
from document_summarization_processor import DocumentSummarizationProcessor
from citation_generator import process_analysis_with_citations

# Import database models and functions
from database import db, init_db, shutdown_session
# 🔥 FIXED: Added missing SummarizationJob and SummarizationResult imports
from models import (User, Company, AnalysisJob, AnalysisResult, ActivityLog,
                   Session as UserSession, Case, CaseDocument, SubscriptionPlan,
                   Subscription, SummarizationJob, SummarizationResult, FailedLoginAttempt,
                   ProcessService, AssistantConversation, AssistantMessage)

# Import blueprints
from auth_routes import auth_bp, login_required, admin_required, company_admin_required, log_activity
from admin_routes import admin_bp
from company_admin_routes import company_admin_bp
from case_routes import case_bp

# 🆕 NEW: Import AI Legal Search functionality
from ai_legal_search_routes import ai_legal_search_bp, init_legal_search

# 🤖 NEW: Import AI Assistant functionality
from assistant_routes import assistant_bp

app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.secret_key = os.environ.get('FLASK_SECRET_KEY', 'dev-secret-key')
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024  # 50MB max upload
app.config['OPENAI_API_KEY'] = os.environ.get('OPENAI_API_KEY', '')
app.config['ALLOWED_EXTENSIONS'] = {'pdf', 'txt', 'doc', 'docx', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'bmp', 'gif', 'webp'}

# Database configuration
# Build URI from environment variables (loaded from config/.env)
DB_USER = os.environ.get('DB_USER', 'lawbot_user')
DB_PASSWORD = os.environ.get('DB_PASSWORD', '')
DB_HOST = os.environ.get('DB_HOST', 'localhost')
DB_PORT = os.environ.get('DB_PORT', '3306')
DB_NAME = os.environ.get('DB_NAME', 'lawbot')

if DB_PASSWORD:
    # Use environment variables if available
    app.config['SQLALCHEMY_DATABASE_URI'] = f'mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}'
else:
    # Fallback to DATABASE_URL if individual vars not set
    app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'mysql+pymysql://lawbot_user@localhost/lawbot')

app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Force HTTPS URLs for external links (like invitations)
app.config['PREFERRED_URL_SCHEME'] = os.environ.get('PREFERRED_URL_SCHEME', 'https')

# ProxyFix for Apache reverse proxy - gets correct protocol/host from Apache headers
if os.environ.get('USE_PROXY_FIX', 'false').lower() == 'true':
    from werkzeug.middleware.proxy_fix import ProxyFix
    app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
    print("✅ ProxyFix enabled for Apache reverse proxy")

# Load security configuration
from config.security_config import *
app.config['WTF_CSRF_ENABLED'] = WTF_CSRF_ENABLED
app.config['WTF_CSRF_TIME_LIMIT'] = WTF_CSRF_TIME_LIMIT
app.config['WTF_CSRF_SSL_STRICT'] = WTF_CSRF_SSL_STRICT
app.config['RECAPTCHA_SITE_KEY'] = RECAPTCHA_SITE_KEY
app.config['RECAPTCHA_SECRET_KEY'] = RECAPTCHA_SECRET_KEY

# Initialize security extensions
csrf = CSRFProtect()
csrf.init_app(app)

# Handle CSRF errors gracefully
@app.errorhandler(400)
def handle_csrf_error(e):
    if 'CSRF' in str(e) or 'csrf' in str(e):
        flash('Security token expired. Please try again.', 'warning')
        return redirect(request.referrer or url_for('index'))
    return str(e), 400

# Make csrf_token available in all templates
from flask_wtf.csrf import generate_csrf as wtf_generate_csrf

@app.context_processor
def inject_csrf_token():
    def generate_csrf():
        if app.config.get('WTF_CSRF_ENABLED', True):
            return wtf_generate_csrf()
        return ''
    return dict(csrf_token=generate_csrf)

# Exempt certain routes from CSRF if needed for API endpoints
# csrf.exempt() can be used on specific routes

# Configure limiter with app (imported from extensions.py to avoid circular imports)
limiter.init_app(app)
app.config['LIMITER_KEY_FUNC'] = get_remote_address
app.config['LIMITER_DEFAULT_LIMITS'] = [RATELIMIT_DEFAULT]
app.config['LIMITER_STORAGE_URI'] = RATELIMIT_STORAGE_URI
app.config['LIMITER_ENABLED'] = RATELIMIT_ENABLED

# Load additional security config
app.config['MAX_LOGIN_ATTEMPTS'] = int(os.environ.get('MAX_LOGIN_ATTEMPTS', 5))
app.config['LOCKOUT_DURATION_MINUTES'] = int(os.environ.get('LOCKOUT_DURATION_MINUTES', 30))

# 🔥 Initialize Stripe AFTER app config is set
init_stripe(app)

# Ensure upload directory exists
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)

# Initialize database
db.init_app(app)

# Initialize Flask-Mail
from email_config import init_mail
init_mail(app)

# ===== 🔧 TEMPLATE FILTERS FOR DATE/DATETIME HANDLING =====
# These must be defined AFTER app creation but BEFORE blueprint registration

@app.template_filter('safe_timestamp')
def safe_timestamp(date_obj):
    """Safely get timestamp from date or datetime object"""
    if not date_obj:
        return ''
    try:
        if hasattr(date_obj, 'timestamp'):
            # It's a datetime object
            return date_obj.timestamp()
        else:
            # It's a date object, convert to datetime first
            dt = datetime.combine(date_obj, dt_time.min)
            return dt.timestamp()
    except (AttributeError, TypeError):
        return ''

@app.template_filter('date_to_timestamp')
def date_to_timestamp(date_obj):
    """Convert date or datetime object to timestamp"""
    if not date_obj:
        return 0
    
    try:
        if hasattr(date_obj, 'timestamp'):
            return date_obj.timestamp()
        else:
            datetime_obj = datetime.combine(date_obj, dt_time.min)
            return datetime_obj.timestamp()
    except (AttributeError, TypeError):
        return 0

@app.template_filter('as_datetime')
def as_datetime(date_obj):
    """Convert date object to datetime object"""
    if not date_obj:
        return None
        
    try:
        if hasattr(date_obj, 'timestamp'):
            # Already a datetime
            return date_obj
        else:
            # Convert date to datetime
            return datetime.combine(date_obj, dt_time.min)
    except (AttributeError, TypeError):
        return None

@app.template_filter('format_date')
def format_date(date_obj, format_str='%Y-%m-%d'):
    """Safely format date/datetime objects"""
    if not date_obj:
        return ''
    try:
        return date_obj.strftime(format_str)
    except (AttributeError, TypeError):
        return ''

# ===== VERIFY FILTERS ARE REGISTERED =====
print("🔧 Template filters registered:")
for filter_name in ['safe_timestamp', 'date_to_timestamp', 'as_datetime', 'format_date']:
    if filter_name in app.jinja_env.filters:
        print(f"  ✅ {filter_name}")
    else:
        print(f"  ❌ {filter_name} - NOT FOUND")

# Register blueprints AFTER template filters
app.register_blueprint(auth_bp)
app.register_blueprint(admin_bp)
app.register_blueprint(company_admin_bp)
app.register_blueprint(case_bp)
app.register_blueprint(summarization_bp)
app.register_blueprint(subscription_bp)
# 🆕 NEW: Register AI Legal Search blueprint
app.register_blueprint(ai_legal_search_bp)
# Law Library blueprint
from law_library_routes import law_library_bp
app.register_blueprint(law_library_bp)
# 🤖 NEW: Register AI Assistant blueprint
app.register_blueprint(assistant_bp)

# Exempt specific blueprint routes from CSRF protection
csrf.exempt(summarization_bp)

# 🆕 NEW: Initialize AI Legal Search after app configuration
init_legal_search(app)

# ===== SECURITY HEADERS =====
@app.after_request
def set_security_headers(response):
    """Add security headers to all responses"""
    # HTTP Strict Transport Security (HSTS)
    if os.environ.get('ENABLE_HSTS', 'True') == 'True':
        hsts_max_age = os.environ.get('HSTS_MAX_AGE', '31536000')  # 1 year default
        response.headers['Strict-Transport-Security'] = f'max-age={hsts_max_age}; includeSubDomains; preload'

    # Content Security Policy (CSP)
    if os.environ.get('ENABLE_CSP', 'True') == 'True':
        csp = (
            "default-src 'self'; "
            "script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdn.jsdelivr.net https://js.stripe.com; "
            "style-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net https://fonts.googleapis.com; "
            "font-src 'self' https://fonts.gstatic.com; "
            "img-src 'self' data: https:; "
            "connect-src 'self' https://api.openai.com https://api.anthropic.com https://www.courtlistener.com; "
            "frame-src 'self' https://js.stripe.com; "
            "object-src 'none'; "
            "base-uri 'self'; "
            "form-action 'self';"
        )
        response.headers['Content-Security-Policy'] = csp

    # X-Frame-Options - Prevent clickjacking
    if os.environ.get('ENABLE_X_FRAME_OPTIONS', 'True') == 'True':
        x_frame_option = os.environ.get('X_FRAME_OPTIONS', 'DENY')
        response.headers['X-Frame-Options'] = x_frame_option

    # X-Content-Type-Options - Prevent MIME type sniffing
    response.headers['X-Content-Type-Options'] = 'nosniff'

    # X-XSS-Protection - Enable browser XSS protection
    response.headers['X-XSS-Protection'] = '1; mode=block'

    # Referrer-Policy - Control referrer information
    response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'

    # Permissions-Policy - Control browser features
    response.headers['Permissions-Policy'] = (
        'geolocation=(), '
        'microphone=(), '
        'camera=(), '
        'payment=(self), '
        'usb=(), '
        'magnetometer=(), '
        'gyroscope=(), '
        'accelerometer=()'
    )

    return response

# ===== ERROR HANDLERS =====
@app.errorhandler(404)
def not_found_error(error):
    """Handle 404 Not Found errors"""
    return render_template('error.html',
                         error="Page Not Found",
                         message="The page you're looking for doesn't exist or has been moved."), 404

@app.errorhandler(500)
def internal_error(error):
    """Handle 500 Internal Server errors"""
    db.session.rollback()  # Rollback any database transactions
    app.logger.error(f'Server Error: {error}')
    return render_template('error.html',
                         error="Internal Server Error",
                         message="We experienced an unexpected error. Our team has been notified and will investigate the issue."), 500

@app.errorhandler(Exception)
def handle_exception(error):
    """Handle all uncaught exceptions"""
    # Log the error
    app.logger.error(f'Unhandled Exception: {error}', exc_info=True)

    # Rollback any database transactions
    db.session.rollback()

    # Return error page
    return render_template('error.html',
                         error="An Error Occurred",
                         message="We encountered an unexpected error. Please try again or report this issue if it persists."), 500

# Close database session when application shuts down
@app.teardown_appcontext
def shutdown_db_session(exception=None):
    shutdown_session()

@app.context_processor
def inject_subscription_data():
    """Make subscription data available in all templates"""
    if 'user_id' in session:
        user_id = session['user_id']
        user = db.session.get(User, user_id)
        subscription = user.get_subscription() if user else None

        # Calculate case count based on user role and permissions
        case_count = 0
        recent_cases = []
        
        if user:
            try:
                if user.is_admin():
                    # Admin sees all cases
                    case_count = Case.query.count()
                    recent_cases = Case.query.order_by(Case.created_at.desc()).limit(3).all()
                elif user.is_company_admin():
                    # Company admin sees all company cases
                    case_count = Case.query.filter_by(company_id=user.company_id).count()
                    recent_cases = Case.query.filter_by(company_id=user.company_id).order_by(Case.created_at.desc()).limit(3).all()
                else:
                    # Regular users see cases they created, lead, team member, or non-confidential company cases
                    case_query = Case.query.filter(
                        db.or_(
                            Case.created_by_id == user.id,
                            Case.lead_attorney_id == user.id,
                            db.and_(Case.company_id == user.company_id, Case.is_confidential == False),
                            Case.team_members.any(User.id == user.id)  # Include team member cases
                        )
                    )
                    case_count = case_query.count()
                    recent_cases = case_query.order_by(Case.created_at.desc()).limit(3).all()
            except Exception as e:
                print(f"Error calculating case count: {e}")
                case_count = 0
                recent_cases = []

        return {
            'current_subscription': subscription,
            'user_plan': subscription.plan.name if subscription else 'free',
            'can_analyze': subscription.can_analyze_document() if subscription else False,
            'analyses_remaining': user.get_monthly_analyses_remaining() if user else 0,
            'user_case_count': case_count,
            'recent_cases': recent_cases,
            'current_user': user  # Add current user to context
        }
    return {
        'user_case_count': 0,
        'recent_cases': [],
        'current_user': None
    }

def allowed_file(filename):
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']

# Helper functions for file type detection
def is_image_file(filename):
    """Check if the uploaded file is an image"""
    image_extensions = {'jpg', 'jpeg', 'png', 'tif', 'tiff', 'bmp', 'gif', 'webp'}
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in image_extensions

def is_document_file(filename):
    """Check if the uploaded file is a document"""
    document_extensions = {'pdf', 'txt', 'docx', 'doc'}
    return '.' in filename and filename.rsplit('.', 1)[1].lower() in document_extensions

def ensure_user_subscription(user):
    """Ensure user has a subscription record (inherit from company or create free plan)"""
    subscription = user.get_subscription()
    if not subscription:
        # First, check if user's company has an active subscription
        company_subscription = None
        if user.company_id:
            # Get the company's active subscription
            company_subscription = Subscription.query.filter_by(
                company_id=user.company_id,
                status='active'
            ).first()
        
        if company_subscription and company_subscription.plan:
            # Company has an active subscription - inherit the plan
            plan_to_assign = company_subscription.plan
            print(f"🏢 Assigning user {user.username} to company plan: {plan_to_assign.name}")
        else:
            # No company subscription or company has free plan - assign free plan
            plan_to_assign = SubscriptionPlan.query.filter_by(name='free').first()
            print(f"👤 Assigning user {user.username} to free plan (no company subscription)")
        
        if plan_to_assign:
            new_subscription = Subscription(
                user_id=user.id,
                company_id=user.company_id,
                plan_id=plan_to_assign.id,
                status='active',
                monthly_analyses_used=0,
                monthly_reset_date=datetime.utcnow() + timedelta(days=30)
            )
            db.session.add(new_subscription)
            db.session.commit()
            print(f"✅ Created {plan_to_assign.name} subscription for user {user.username}")
            return new_subscription
        else:
            print(f"❌ Error: Could not find plan to assign to user {user.username}")
    
    return subscription

def ensure_company_users_have_correct_subscription(company_id):
    """Ensure all users in a company have the correct subscription plan"""
    from models import User, Subscription, SubscriptionPlan
    
    # Get company's active subscription
    company_subscription = Subscription.query.filter_by(
        company_id=company_id,
        status='active'
    ).first()
    
    if not company_subscription:
        print(f"⚠️ Company {company_id} has no active subscription")
        return
    
    # Get all active users in the company
    company_users = User.query.filter_by(
        company_id=company_id,
        active=True
    ).all()
    
    users_updated = 0
    for user in company_users:
        user_subscription = user.get_subscription()
        
        # If user has no subscription or wrong plan, update it
        if (not user_subscription or 
            user_subscription.plan_id != company_subscription.plan_id):
            
            if user_subscription:
                # Update existing subscription
                user_subscription.plan_id = company_subscription.plan_id
                print(f"🔄 Updated user {user.username} to {company_subscription.plan.name} plan")
            else:
                # Create new subscription
                new_subscription = Subscription(
                    user_id=user.id,
                    company_id=company_id,
                    plan_id=company_subscription.plan_id,
                    status='active',
                    monthly_analyses_used=0,
                    monthly_reset_date=datetime.utcnow() + timedelta(days=30)
                )
                db.session.add(new_subscription)
                print(f"➕ Created {company_subscription.plan.name} subscription for user {user.username}")
            
            users_updated += 1
    
    if users_updated > 0:
        db.session.commit()
        print(f"✅ Updated {users_updated} users to {company_subscription.plan.name} plan")
    else:
        print(f"✅ All users already have correct subscription plan")

# NEW: Image analysis function
def analyze_image_with_openai(image_path, perspective, additional_instructions=None):
    """Analyze an image using OpenAI's Vision API with legal context"""
    try:
        # Read and encode the image
        with open(image_path, "rb") as image_file:
            image_data = image_file.read()
            base64_image = base64.b64encode(image_data).decode('utf-8')

        # Create perspective-specific prompts for legal analysis
        perspective_prompts = {
            'prosecutor': """Analyze this image from a prosecutor's perspective for legal proceedings. Focus on:
- Evidence of criminal activity, violations, or negligence
- Elements that could support criminal charges or civil liability
- Safety violations, regulatory non-compliance, or dangerous conditions
- Items, people, or conditions that strengthen the prosecution's case
- Potential witnesses or evidence sources visible
- Documentation quality and evidentiary value""",

            'defense': """Analyze this image from a defense attorney's perspective. Look for:
- Evidence that could support the defendant's case or create reasonable doubt
- Mitigating circumstances or alternative explanations
- Procedural issues with evidence collection or documentation
- Environmental factors that could explain the defendant's actions
- Elements that challenge the prosecution's narrative
- Potential defenses or justifications visible in the scene""",

            'neutral': """Provide a neutral legal analysis of this image for legal proceedings. Examine:
- All visible elements and their potential legal significance
- Factual observations relevant to legal proceedings
- Evidentiary value and admissibility considerations
- Safety, compliance, and regulatory aspects
- Documentation quality and chain of custody considerations
- Relevant details for both prosecution and defense perspectives"""
        }

        base_prompt = perspective_prompts.get(perspective, perspective_prompts['neutral'])

        if additional_instructions:
            base_prompt += f"\n\nSpecific focus areas requested: {additional_instructions}"

        base_prompt += """

Provide a comprehensive legal image analysis including:

**1. Overall Assessment**
- Summary of what the image shows in legal context
- Key elements relevant to legal proceedings

**2. Evidence Analysis**
- Physical evidence visible in the image
- Potential relevance to the case
- Quality and admissibility considerations

**3. Scene Analysis**
- Environmental factors and context
- Visible conditions that may be legally significant
- Any safety or regulatory compliance issues

**4. Legal Implications**
- How this evidence might impact the case
- Potential legal theories or claims supported or challenged
- Relevance to burden of proof

**5. Strategic Considerations**
- How to use this evidence effectively
- Potential challenges or weaknesses
- Questions this evidence raises

Provide specific, detailed observations grounded in what's actually visible in the image."""

        client = openai.OpenAI(api_key=app.config['OPENAI_API_KEY'])

        response = client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": base_prompt},
                        {
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{base64_image}",
                                "detail": "high"
                            }
                        }
                    ]
                }
            ],
            max_tokens=4000,
            temperature=0.3
        )

        return response.choices[0].message.content

    except Exception as e:
        print(f"Error in image analysis: {e}")
        return f"Error analyzing image: {str(e)}"

def background_analysis(job_uuid, file_path, perspective, analysis_type, additional_instructions=None):
    """Perform document or image analysis in background thread"""
    try:
        with app.app_context():
            # Get the job
            job = AnalysisJob.query.filter_by(job_uuid=job_uuid).first()
            if not job:
                print(f"Job {job_uuid} not found")
                return

            # Get user for usage tracking
            user = db.session.get(User, job.user_id)
            if not user:
                print(f"User {job.user_id} not found")
                return

            print(f"Starting {analysis_type} analysis for job {job_uuid}")

            if analysis_type == "image":
                # Analyze image using OpenAI Vision
                analysis_text = analyze_image_with_openai(file_path, perspective, additional_instructions)

                # Create result object with image-specific structure
                result_data = {
                    "final_analysis": analysis_text,
                    "section_analyses": [],  # Images don't have sections
                    "citations": []  # Images don't have citations
                }

            else:  # Document analysis
                # Process document
                api_key = os.getenv('OPENAI_API_KEY')
                if not api_key:
                    raise ValueError("OPENAI_API_KEY not found in environment variables")
                processor = ImprovedDocumentProcessor(api_key)
                result = processor.direct_analyze_document(file_path, perspective)
                if not result:
                    raise Exception("Failed to process document")

                # Process citations - FIXED: Pass the text string and unpack the tuple
                processed_text, citations = process_analysis_with_citations(
                    result['final_analysis'],  # Pass the actual text, not the whole result dict
                    perspective,
                    api_key
                )

                # Create result_data for consistency with image analysis
                result_data = {
                    "final_analysis": processed_text,
                    "section_analyses": result.get("section_analyses", []),
                    "citations": citations
                }

            # Save results to database (this applies to both image and document analysis)
            analysis_result = AnalysisResult(
                job_id=job.id,
                final_analysis=result_data["final_analysis"],
                section_analyses=result_data["section_analyses"],
                citations=result_data["citations"]
            )
            db.session.add(analysis_result)

            # Update job status
            job.status = "completed"
            job.completed_at = datetime.utcnow()

            # Update user's usage count
            subscription = ensure_user_subscription(user)
            if subscription:
                # Check if this is using a single analysis purchase
                if subscription.plan.name == 'free' and subscription.monthly_analyses_used >= subscription.plan.get_monthly_analysis_limit():
                    # User is over limit, they must be using a single purchase
                    # Import here to avoid circular import at module level
                    from models import SingleAnalysisPurchase
                    unused_purchase = SingleAnalysisPurchase.query.filter_by(
                        user_id=user.id,
                        status='completed'
                    ).first()
                    if unused_purchase:
                        unused_purchase.mark_as_used(job_uuid)
                        print(f"Used single analysis purchase for job {job_uuid}")
                else:
                    # Normal monthly usage
                    subscription.monthly_analyses_used += 1

            db.session.commit()

            print(f"Analysis completed successfully for job {job_uuid}")

            # Log the activity
            user_id = job.user_id
            log_activity(
                user_id,
                'analysis_completed',
                f'Completed {perspective} analysis of {job.original_filename}',
                'analysis_job',
                job.id
            )

            # Log case activity if applicable
            if job.case_id:
                from case_routes import log_case_activity
                log_case_activity(
                    job.case_id,
                    user_id,
                    'analysis_completed',
                    f'Completed {perspective} analysis of {job.original_filename}',
                    'analysis',
                    job.id
                )

    except Exception as e:
        # Update job with error
        with app.app_context():
            job = AnalysisJob.query.filter_by(job_uuid=job_uuid).first()
            if job:
                job.status = "error"
                job.error_message = str(e)
                db.session.commit()

        print(f"Error in background analysis for job {job_uuid}: {e}")
        # Add more detailed error logging for debugging
        import traceback
        print(f"Full traceback: {traceback.format_exc()}")

# ===== COURTLISTENER SEARCH HELPER FUNCTIONS =====

def extract_case_name(case):
    """Extract case name from CourtListener API response - FIXED VERSION"""
    try:
        # Debug: Print available fields
        print(f"Available fields in case: {list(case.keys())}")

        # Try the exact field names from CourtListener API
        case_name_fields = ['caseName', 'caseNameFull', 'case_name', 'title']

        for field in case_name_fields:
            if field in case and case[field]:
                case_name = str(case[field]).strip()
                if case_name and case_name.lower() != 'none':
                    print(f"Found case name in field '{field}': {case_name}")
                    return case_name

        # Try nested court data for case name
        if 'court' in case and isinstance(case['court'], dict):
            court_data = case['court']
            for field in ['name', 'short_name', 'full_name']:
                if field in court_data and court_data[field]:
                    case_name = str(court_data[field]).strip()
                    if case_name:
                        print(f"Found case name in court.{field}: {case_name}")
                        return f"Case from {case_name}"

        # Try to build case name from other available fields
        if 'id' in case:
            return f"Case ID: {case['id']}"

        # Last resort
        return 'Untitled Case'

    except Exception as e:
        print(f"Error extracting case name: {e}")
        return 'Untitled Case'

def extract_citation(case):
    """Extract citation from CourtListener API response - FIXED VERSION"""
    try:
        print(f"Extracting citation from case with fields: {list(case.keys())}")

        # Try neutralCite first (most reliable)
        if 'neutralCite' in case and case['neutralCite']:
            print(f"Found neutralCite: {case['neutralCite']}")
            return case['neutralCite']

        # Try citation field
        if 'citation' in case and case['citation']:
            citation_data = case['citation']
            print(f"Found citation field: {citation_data}")

            if isinstance(citation_data, str):
                return citation_data
            elif isinstance(citation_data, dict):
                # Build citation from parts
                volume = citation_data.get('volume', '')
                reporter = citation_data.get('reporter', '')
                page = citation_data.get('page', '')

                if volume and reporter and page:
                    return f"{volume} {reporter} {page}"
                elif volume and reporter:
                    return f"{volume} {reporter}"
            elif isinstance(citation_data, list) and citation_data:
                return str(citation_data[0])

        # Try to build citation from other fields
        court_name = "Court"
        if 'court' in case and case['court']:
            if isinstance(case['court'], dict):
                court_name = case['court'].get('short_name', case['court'].get('name', 'Court'))
            else:
                court_name = str(case['court'])

        date_filed = case.get('dateFiled', case.get('date_filed', ''))
        if date_filed:
            return f"{court_name} ({date_filed})"

        # Last resort: use any identifier
        if 'id' in case:
            return f"CourtListener ID: {case['id']}"

        return 'Citation not available'

    except Exception as e:
        print(f"Error extracting citation: {e}")
        return 'Citation not available'

def extract_case_synopsis(case):
    """Extract case synopsis/summary from available fields"""
    try:
        synopsis_parts = []

        # 1. Try syllabus first (most likely to contain case summary)
        if 'syllabus' in case and case['syllabus']:
            syllabus = str(case['syllabus']).strip()
            if syllabus and len(syllabus) > 10:
                synopsis_parts.append(syllabus)
                print(f"Found syllabus: {syllabus[:100]}...")

        # 2. Try procedural history
        if 'procedural_history' in case and case['procedural_history']:
            proc_history = str(case['procedural_history']).strip()
            if proc_history and len(proc_history) > 10:
                synopsis_parts.append(proc_history)
                print(f"Found procedural history: {proc_history[:100]}...")

        # 3. Try posture (case status/posture)
        if 'posture' in case and case['posture']:
            posture = str(case['posture']).strip()
            if posture and len(posture) > 10:
                synopsis_parts.append(posture)
                print(f"Found posture: {posture[:100]}...")

        # 4. Try suitNature (nature of the suit)
        if 'suitNature' in case and case['suitNature']:
            suit_nature = str(case['suitNature']).strip()
            if suit_nature and len(suit_nature) > 10:
                synopsis_parts.append(f"Nature of suit: {suit_nature}")
                print(f"Found suit nature: {suit_nature}")

        # 5. Try to extract from opinions if available
        if 'opinions' in case and case['opinions'] and isinstance(case['opinions'], list):
            for opinion in case['opinions'][:1]:  # Just check first opinion
                if isinstance(opinion, dict) and 'snippet' in opinion and opinion['snippet']:
                    snippet = str(opinion['snippet']).strip()
                    if snippet and len(snippet) > 20:
                        synopsis_parts.append(snippet)
                        print(f"Found opinion snippet: {snippet[:100]}...")
                        break

        # 6. Try snippet field directly
        if 'snippet' in case and case['snippet']:
            snippet = str(case['snippet']).strip()
            if snippet and len(snippet) > 20:
                synopsis_parts.append(snippet)
                print(f"Found direct snippet: {snippet[:100]}...")

        # Combine all found synopsis parts
        if synopsis_parts:
            # Join all parts and limit length
            full_synopsis = ' | '.join(synopsis_parts)

            # Clean up the text
            full_synopsis = full_synopsis.replace('\n', ' ').replace('\r', ' ')
            full_synopsis = ' '.join(full_synopsis.split())  # Remove extra whitespace

            # Limit to reasonable length for display
            if len(full_synopsis) > 400:
                full_synopsis = full_synopsis[:400] + '...'

            return full_synopsis

        # Fallback: Generic message
        return 'Case synopsis not available'

    except Exception as e:
        print(f"Error extracting synopsis: {e}")
        return 'Case synopsis not available'

# ===== ENHANCED LEGAL RESEARCH FUNCTIONS =====

def search_courtlistener_api(query):
    """Extract CourtListener search logic for enhanced research"""
    api_token = os.environ.get('COURTLISTENER_API_TOKEN')
    if not api_token:
        print("❌ CourtListener API token not found")
        return []

    headers = {
        'Authorization': f'Token {api_token}',
        'User-Agent': 'LawBot Legal Research Platform',
        'Accept': 'application/json',
        'Accept-Language': 'en-US,en;q=0.9',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive'
    }

    search_params = {
        'q': query,
        'type': 'o',  # Opinions
        'format': 'json'
    }

    try:
        print(f"🔍 Searching CourtListener for: {query}")
        response = requests.get(
            'https://www.courtlistener.com/api/rest/v4/search/',
            params=search_params,
            headers=headers,
            timeout=15,
            verify=True
        )

        if response.status_code == 200:
            api_data = response.json()
            results = []

            for i, item in enumerate(api_data.get('results', [])[:10]):
                try:
                    case_name = extract_case_name(item)
                    citation = extract_citation(item)
                    
                    court = 'Unknown Court'
                    if 'court' in item and item['court']:
                        if isinstance(item['court'], dict):
                            court = item['court'].get('name', item['court'].get('short_name', 'Unknown Court'))
                        else:
                            court = str(item['court'])

                    date_filed = item.get('dateFiled', item.get('date_filed', 'Date not available'))
                    synopsis = extract_case_synopsis(item)

                    absolute_url = ''
                    if 'absolute_url' in item and item['absolute_url']:
                        absolute_url = item['absolute_url']
                        if not absolute_url.startswith('http'):
                            absolute_url = f"https://www.courtlistener.com{absolute_url}"

                    result_item = {
                        'case_name': case_name,
                        'citation': citation,
                        'court': court,
                        'date': date_filed,
                        'snippet': synopsis,
                        'url': absolute_url,
                        'relevance_score': item.get('score', 0)
                    }

                    results.append(result_item)
                    
                except Exception as item_error:
                    print(f"Error processing CourtListener result {i+1}: {str(item_error)}")
                    continue

            print(f"✅ CourtListener returned {len(results)} results")
            return results
            
    except Exception as e:
        print(f"❌ CourtListener API error: {e}")
        return []

    return []

def search_with_claude_ai(query):
    """Use Claude API for comprehensive legal analysis"""
    try:
        print(f"🤖 Starting Claude AI analysis for: {query}")
        
        # Get API key from environment
        api_key = os.environ.get('ANTHROPIC_API_KEY')
        if not api_key:
            error_msg = "Claude API key not found in environment variables"
            print(f"❌ {error_msg}")
            return error_msg
        
        print(f"✅ API key found: {api_key[:15]}...")
        
        # FIXED: Include API key in headers
        response = requests.post("https://api.anthropic.com/v1/messages", 
            headers={
                "Content-Type": "application/json",
                "x-api-key": api_key,  # 🔥 THIS WAS MISSING!
                "anthropic-version": "2023-06-01"
            },
            json={
                "model": "claude-sonnet-4-20250514",  # Updated to working model
                "max_tokens": 2000,
                "messages": [
                    {
                        "role": "user", 
                        "content": f"""You are a legal research assistant with access to current information through web search. 

Research and provide comprehensive information about: {query}

Please include:

**1. Case Background & Facts**
- What happened and key details
- Parties involved
- Timeline of events

**2. Legal Issues & Current Status**
- Criminal charges or civil claims
- Court proceedings and current status
- Any convictions, sentences, or judgments

**3. Recent Developments**
- Latest court decisions or appeals
- Recent news or legal developments
- Current procedural status

**4. Legal Context**
- Relevant legal precedents or similar cases
- Applicable laws and legal principles
- Jurisdictional considerations

**5. Official Sources**
- Where to find court documents and records
- Relevant court websites or databases
- Public records availability

Use web search to find current information. Focus on accuracy and clearly cite your sources when possible. If information is uncertain, state that clearly and suggest verification methods.

Format your response with clear headings using **bold text** and bullet points for easy reading."""
                    }
                ]
            },
            timeout=30
        )
        
        print(f"📡 API Response Status: {response.status_code}")
        
        if response.status_code == 200:
            data = response.json()
            analysis_text = data['content'][0]['text']
            print(f"✅ Claude AI analysis completed ({len(analysis_text)} chars)")
            return analysis_text
        else:
            error_msg = f"Claude API returned status {response.status_code}: {response.text}"
            print(f"❌ {error_msg}")
            return error_msg
            
    except Exception as e:
        error_msg = f"AI analysis error: {str(e)}"
        print(f"❌ Claude AI error: {e}")
        return error_msg

# ===== MAIN ROUTES =====

@app.route('/')
def index():
    if 'user_id' in session:
        return redirect(url_for('dashboard'))
    return render_template('landing.html')  # Show landing page for non-logged-in users

@app.route('/signup', methods=['GET', 'POST'])
@limiter.limit(SIGNUP_RATE_LIMIT)  # Rate limiting
def signup():
    """Handle free account signup with security measures"""
    from email_config import mail
    from flask_mail import Message
    from security_utils import (verify_recaptcha, check_honeypot, is_ip_blocked, 
                                record_failed_attempt, validate_password_strength,
                                generate_verification_token, send_verification_email)
    
    if request.method == 'POST':
        # Check if IP is blocked
        client_ip = request.remote_addr
        if is_ip_blocked(client_ip):
            flash('Too many attempts. Please try again later.', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Verify reCAPTCHA
        recaptcha_response = request.form.get('g-recaptcha-response')
        if RECAPTCHA_ENABLED and not verify_recaptcha(recaptcha_response):
            record_failed_attempt(client_ip)
            flash('Please complete the reCAPTCHA verification', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Check honeypot field (anti-bot)
        if not check_honeypot(request.form):
            record_failed_attempt(client_ip)
            # Don't reveal it's a honeypot - act like normal failure
            flash('Invalid submission. Please try again.', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Get form data
        first_name = request.form.get('first_name', '').strip()
        last_name = request.form.get('last_name', '').strip()
        email = request.form.get('email', '').strip().lower()
        phone_number = request.form.get('phone_number', '').strip()
        password = request.form.get('password')
        confirm_password = request.form.get('confirm_password')
        organization = request.form.get('organization', '').strip()

        # Validate required fields
        if not all([first_name, last_name, email, phone_number, password, confirm_password]):
            flash('All required fields must be filled', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Validate email format
        email_pattern = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
        if not re.match(email_pattern, email):
            flash('Please enter a valid email address', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Validate passwords match
        if password != confirm_password:
            flash('Passwords do not match', 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Validate password strength
        password_errors = validate_password_strength(password)
        if password_errors:
            for error in password_errors:
                flash(error, 'error')
            return redirect(url_for('signup') + '#signup')
        
        # Check if email already exists
        existing_user = User.query.filter_by(email=email).first()
        if existing_user:
            flash('Email already registered. Please login.', 'error')
            return redirect(url_for('signup') + '#signup')
        
        try:
            # Create a new company for the individual user (or use their organization name)
            company_name = organization if organization else f"{first_name} {last_name}'s Practice"
            company = Company(
                name=company_name,
                description=f"Individual account for {first_name} {last_name}",
                subscription_tier='free',
                active=True
            )
            db.session.add(company)
            db.session.flush()  # Get company ID
            
            # Generate email verification token
            verification_token = generate_verification_token()
            
            # Create the user account (inactive until email verified)
            user = User(
                username=email,  # Use email as username
                email=email,
                first_name=first_name,
                last_name=last_name,
                phone_number=phone_number,
                role='user',  # Regular user role
                company_id=company.id,
                active=False if EMAIL_VERIFICATION_REQUIRED else True,
                email_verified=False,
                email_verification_token=verification_token,
                email_verification_sent_at=datetime.utcnow()
            )
            user.set_password(password)
            db.session.add(user)
            db.session.flush()  # Get user ID
            
            # Create free subscription
            free_plan = SubscriptionPlan.query.filter_by(stripe_price_id='free').first()
            if not free_plan:
                # Create free plan if it doesn't exist
                free_plan = SubscriptionPlan(
                    name='Free',
                    display_name='Free Plan',
                    stripe_price_id='free',
                    price=0,
                    features={'basic_analysis': True, 'case_limit': 1, 'monthly_analyses': 5}
                )
                db.session.add(free_plan)
                db.session.flush()
            
            subscription = Subscription(
                user_id=user.id,
                plan_id=free_plan.id,
                status='active',
                current_period_start=datetime.utcnow(),
                current_period_end=datetime.utcnow() + timedelta(days=30),
                monthly_analyses_used=0
            )
            db.session.add(subscription)
            
            # Log activity
            activity = ActivityLog(
                user_id=user.id,
                company_id=company.id,
                activity_type='signup',
                description=f"New user signup: {email}"
            )
            db.session.add(activity)
            
            db.session.commit()
            
            # Send verification email
            if EMAIL_VERIFICATION_REQUIRED:
                if send_verification_email(user, verification_token):
                    flash('Account created! Please check your email to verify your account.', 'success')
                else:
                    flash('Account created but email could not be sent. Please contact support.', 'warning')
            else:
                # If verification not required, send welcome email
                try:
                    msg = Message(
                        'Welcome to EPOLaw - Your Account is Ready',
                        sender=app.config.get('MAIL_USERNAME', 'noreply@epolaw.com'),
                        recipients=[email]
                    )
                    msg.html = f'''
                    <h2>Welcome to EPOLaw, {first_name}!</h2>
                    <p>Your free account has been created successfully.</p>
                    <h3>Your Login Credentials:</h3>
                    <ul>
                        <li><strong>Email:</strong> {email}</li>
                        <li><strong>Password:</strong> [The password you created]</li>
                    </ul>
                    <h3>Your Free Plan Includes:</h3>
                    <ul>
                        <li>5 document analyses per month</li>
                        <li>Basic AI legal research</li>
                        <li>1 active case</li>
                        <li>Email support</li>
                    </ul>
                    <p><a href="{url_for('auth.login', _external=True)}">Click here to login</a></p>
                    <h3>Ready to Upgrade?</h3>
                    <p>If you'd like to upgrade to our Professional or Enterprise plan for unlimited analyses and team features, 
                    simply reply to this email and our team will assist you.</p>
                    <p>Best regards,<br>The EPOLaw Team</p>
                    '''
                    mail.send(msg)
                    flash('Account created successfully! Check your email for login details.', 'success')
                except Exception as e:
                    print(f"Failed to send welcome email: {e}")
                    flash('Account created successfully! You can now login.', 'success')
            
            # Send notification to admin about new signup
            try:
                admin_users = User.query.filter_by(role='admin').all()
                for admin in admin_users:
                    msg = Message(
                        'New EPOLaw Signup - Potential Upsell Opportunity',
                        sender=app.config.get('MAIL_USERNAME', 'noreply@epolaw.com'),
                        recipients=[admin.email]
                    )
                    msg.html = f'''
                    <h2>New User Signup</h2>
                    <p>A new user has signed up for a free account:</p>
                    <ul>
                        <li><strong>Name:</strong> {first_name} {last_name}</li>
                        <li><strong>Email:</strong> <a href="mailto:{email}">{email}</a></li>
                        <li><strong>Phone Number:</strong> <a href="tel:{phone_number}">{phone_number}</a></li>
                        <li><strong>Organization:</strong> {organization if organization else 'Not specified'}</li>
                        <li><strong>Company:</strong> {company_name}</li>
                        <li><strong>Plan:</strong> Free</li>
                    </ul>
                    <p>This is a potential upsell opportunity to Professional or Enterprise plan.</p>
                    <p><a href="{url_for('admin.users', _external=True)}">View in Admin Panel</a></p>
                    '''
                    mail.send(msg)
            except Exception as e:
                print(f"Failed to send admin notification: {e}")
            
            return redirect(url_for('auth.login'))
            
        except Exception as e:
            db.session.rollback()
            import traceback
            error_details = traceback.format_exc()
            print(f"Signup error: {e}")
            print(f"Traceback: {error_details}")
            # Log to file for debugging
            with open('/var/log/epolaw/signup_errors.log', 'a') as f:
                f.write(f"\n\n[{datetime.utcnow()}] Signup error:\n")
                f.write(f"Error: {str(e)}\n")
                f.write(f"Traceback:\n{error_details}\n")
            flash('An error occurred during signup. Please try again.', 'error')
            return redirect(url_for('signup') + '#signup')
    
    # GET request - show landing page with signup form
    return render_template('landing.html')

@app.route('/verify-email/<token>')
def verify_email(token):
    """Verify email address using token"""
    from security_utils import EMAIL_VERIFICATION_EXPIRY
    
    # Find user with this token
    user = User.query.filter_by(email_verification_token=token).first()
    
    if not user:
        flash('Invalid verification link', 'error')
        return redirect(url_for('auth.login'))
    
    # Check if token has expired
    if user.email_verification_sent_at:
        time_elapsed = datetime.utcnow() - user.email_verification_sent_at
        if time_elapsed > EMAIL_VERIFICATION_EXPIRY:
            flash('Verification link has expired. Please request a new one.', 'error')
            return redirect(url_for('auth.login'))
    
    # Verify the email
    user.email_verified = True
    user.active = True
    user.email_verification_token = None  # Clear the token
    db.session.commit()
    
    # Log the activity
    log_activity(user.id, 'email_verified', f"Email verified: {user.email}")
    
    flash('Email verified successfully! You can now login.', 'success')
    return redirect(url_for('auth.login'))

@app.route('/upload_form')
@login_required
def upload_form():
    """Display the file upload form"""
    # Get case information if provided
    case_id = request.args.get('case_id')
    document_id = request.args.get('document_id')

    case = None
    document = None

    if case_id:
        case = db.session.get(Case, case_id)
        user_id = session.get('user_id')
        user = db.session.get(User, user_id)

        # Check if user can access this case
        if case and not user.can_view_case(case):
            flash('You do not have permission to access this case', 'danger')
            return redirect(url_for('dashboard'))

    if document_id:
        document = db.session.get(CaseDocument, document_id)

    return render_template('index.html', case=case, document=document)

@app.route('/legal_research')
@login_required
def legal_research():
    """Legal research page with CourtListener integration"""
    return render_template('legal_research.html')

@app.route('/enhanced-legal-research')
@login_required  
def enhanced_legal_research_page():
    """Enhanced legal research page with CourtListener + Claude AI"""
    return render_template('enhanced_legal_research.html')

@app.route('/dashboard')
@login_required
def dashboard():
    """
    Display the dashboard with analysis statistics and recent activities
    """
    try:
        # Get current user
        user_id = session.get('user_id')
        current_user = db.session.get(User, user_id)

        # Verify user exists
        if not current_user:
            flash('User session expired. Please log in again.', 'danger')
            return redirect(url_for('auth.login'))

        # 🔥 ENSURE USER HAS SUBSCRIPTION RECORD
        ensure_user_subscription(current_user)

        # Calculate statistics based on user role
        today = datetime.utcnow().date()
        yesterday = today - timedelta(days=1)

        # Base query depending on user role
        if current_user.is_admin():
            base_query = AnalysisJob.query
            summaries_query = SummarizationJob.query
        elif current_user.is_company_admin():
            base_query = AnalysisJob.query.filter_by(company_id=current_user.company_id)
            summaries_query = SummarizationJob.query.filter_by(company_id=current_user.company_id)
        else:
            base_query = AnalysisJob.query.filter_by(user_id=current_user.id)
            summaries_query = SummarizationJob.query.filter_by(user_id=current_user.id)

        # Calculate statistics
        stats = {
            "total_analyses": base_query.count(),
            "completed": base_query.filter_by(status='completed').count(),
            "in_progress": base_query.filter_by(status='processing').count(),
            "failed": base_query.filter_by(status='error').count(),
            "today": base_query.filter(db.func.date(AnalysisJob.created_at) == today).count(),
        }

        # Add image vs document analysis counts
        all_analyses = base_query.all()
        stats["image_analyses"] = sum(1 for job in all_analyses if is_image_file(job.original_filename))
        stats["document_analyses"] = stats["total_analyses"] - stats["image_analyses"]

        # Get recent analyses
        recent_jobs = base_query.order_by(AnalysisJob.created_at.desc()).limit(10).all()

        # 🔥 FIXED: Get recent summaries - THIS IS THE KEY ADDITION FROM THE HASSUMMARIZATION VERSION
        recent_summaries = summaries_query.order_by(SummarizationJob.created_at.desc()).limit(5).all()

        # Format analyses for display
        formatted_analyses = []
        for job in recent_jobs:
            job_date = job.created_at

            if job_date.date() == today:
                date_str = f"Today, {job_date.strftime('%I:%M %p')}"
            elif job_date.date() == yesterday:
                date_str = f"Yesterday, {job_date.strftime('%I:%M %p')}"
            elif job_date.year == today.year:
                date_str = job_date.strftime('%b %d, %I:%M %p')
            else:
                date_str = job_date.strftime('%b %d, %Y')

            # Determine if analysis has viewable results
            has_results = (job.status == 'completed' and
                          job.results is not None)

            # Get case information if available
            case_info = None
            if job.case:
                case_info = {
                    'id': job.case.id,
                    'name': job.case.case_name,
                    'number': job.case.case_number
                }

            # Determine analysis type
            analysis_type = "Image Analysis" if is_image_file(job.original_filename) else "Document Analysis"

            formatted_analyses.append({
                "job_id": job.job_uuid,
                "filename": job.original_filename,
                "status": job.status,
                "perspective": job.perspective,
                "created_at": date_str,
                "analysis_type": analysis_type,
                "case": case_info,
                "has_results": has_results,
                "user": job.user.username if job.user else "Unknown"
            })

        # 🔥 FIXED: Format summaries for display
        formatted_summaries = []
        for summary in recent_summaries:
            summary_date = summary.created_at

            if summary_date.date() == today:
                date_str = f"Today, {summary_date.strftime('%I:%M %p')}"
            elif summary_date.date() == yesterday:
                date_str = f"Yesterday, {summary_date.strftime('%I:%M %p')}"
            elif summary_date.year == today.year:
                date_str = summary_date.strftime('%b %d, %I:%M %p')
            else:
                date_str = summary_date.strftime('%b %d, %Y')

            # Get case information if available
            case_info = None
            if summary.case:
                case_info = {
                    'id': summary.case.id,
                    'name': summary.case.case_name,
                    'number': summary.case.case_number
                }

            formatted_summaries.append({
                "job_id": summary.job_uuid,
                "filename": summary.original_filename,
                "status": summary.status,
                "summary_type": summary.summary_type,
                "summary_length": summary.summary_length,
                "created_at": date_str,
                "case": case_info,
                "user": summary.user.username if summary.user else "Unknown"
            })

        # Get recent cases and case statistics
        recent_cases = []
        active_cases_count = 0
        if current_user.company_id:
            recent_cases = Case.query.filter_by(company_id=current_user.company_id).order_by(Case.created_at.desc()).limit(5).all()
            # Count active cases for the current user
            active_cases_count = Case.query.filter_by(
                company_id=current_user.company_id,
                created_by_id=current_user.id,
                status='active'
            ).count()

        # Add case count to stats
        stats['active_cases'] = active_cases_count

        # Get activities
        if current_user.is_admin():
            activities = ActivityLog.query.order_by(ActivityLog.created_at.desc()).limit(10).all()
        elif current_user.is_company_admin():
            activities = ActivityLog.query.filter_by(company_id=current_user.company_id).order_by(ActivityLog.created_at.desc()).limit(10).all()
        else:
            activities = ActivityLog.query.filter_by(user_id=current_user.id).order_by(ActivityLog.created_at.desc()).limit(10).all()

        # Format activities
        formatted_activities = []
        for activity in activities:
            activity_date = activity.created_at

            if activity_date.date() == today:
                date_str = f"Today, {activity_date.strftime('%I:%M %p')}"
            elif activity_date.date() == yesterday:
                date_str = f"Yesterday, {activity_date.strftime('%I:%M %p')}"
            elif activity_date.year == today.year:
                date_str = activity_date.strftime('%b %d, %I:%M %p')
            else:
                date_str = activity_date.strftime('%b %d, %Y')

            formatted_activities.append({
                "action": activity.activity_type,  # 🔥 FIXED: Use activity_type instead of action
                "description": activity.description,
                "date": date_str,
                "user": activity.user.username if activity.user else "System"
            })

        return render_template('dashboard.html',
                              stats=stats,
                              recent_analyses=formatted_analyses,
                              recent_summaries=formatted_summaries,  # 🔥 THIS IS THE KEY ADDITION
                              recent_cases=recent_cases,
                              activities=formatted_activities,
                              user=current_user)

    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        print(f"Error displaying dashboard: {str(e)}\n{error_details}")

        # Return a basic dashboard with minimal data
        user_id = session.get('user_id')
        current_user = db.session.get(User, user_id) if user_id else None

        default_stats = {
            "total_analyses": 0,
            "completed": 0,
            "in_progress": 0,
            "failed": 0,
            "today": 0,
            "image_analyses": 0,
            "document_analyses": 0
        }

        return render_template('dashboard.html',
                              stats=default_stats,
                              recent_analyses=[],
                              recent_summaries=[],  # 🔥 ALSO ADDED HERE
                              recent_cases=[],
                              activities=[],
                              user=current_user,
                              error_message="Dashboard data temporarily unavailable")

# app.py - FIXED analysis route
@app.route('/analysis/<job_id>')
@login_required
def view_analysis(job_id):
    """Display analysis results or processing page"""
    user_id = session.get('user_id')
    user = db.session.get(User, user_id)

    job = AnalysisJob.query.filter_by(job_uuid=job_id).first_or_404()

    # Check permissions (fixed to include company_admin)
    if job.user_id != user_id and not user.is_admin() and not (user.is_company_admin() and job.company_id == user.company_id):
        flash('You do not have permission to view this analysis', 'danger')
        return redirect(url_for('dashboard'))

    print(f"Analysis route for job {job_id}: status={job.status}, has_results={job.results is not None}")  # Debug

    # FIXED: Show processing page if still processing
    if job.status == 'processing' or job.status == 'pending':
        print(f"Showing processing page for job {job_id}")  # Debug
        return render_template('processing.html', job=job, job_id=job_id)

    # FIXED: Handle failed analysis with retry options
    elif job.status == 'error' or job.status == 'failed':
        # Check if it's a large document
        is_large_document = False
        if job.file_path and os.path.exists(job.file_path):
            file_size = os.path.getsize(job.file_path)
            is_large_document = file_size > 10 * 1024 * 1024  # 10MB
        
        return render_template('error_with_retry.html',
                             job_type='analysis',
                             job=job,
                             error_message=job.error_message or "Document analysis failed. This may be due to document size or complexity.",
                             error_type='Processing Error',
                             is_large_document=is_large_document,
                             now=datetime.utcnow())

    # FIXED: Only check for completed status here
    elif job.status == "completed":
        # Get the results
        result = job.results

        if not result:
            flash('Analysis results not found. Please try re-analyzing the document.', 'warning')
            return redirect(url_for('dashboard'))

        # Check if we have arguments generated
        has_arguments = result and result.arguments is not None

        # Determine analysis type for template
        analysis_type = "Image" if is_image_file(job.original_filename) else "Document"

        # Format job data for template
        formatted_job = {
            "job_id": job.job_uuid,
            "filename": job.original_filename,
            "perspective": job.perspective,
            "status": job.status,
            "analysis_type": analysis_type,
            "case": job.case
        }

        print(f"Showing completed analysis for job {job_id}")  # Debug

        # Show the completed analysis
        return render_template('analysis.html',
                              job=formatted_job,
                              job_id=job_id,
                              final_analysis=result.final_analysis,
                              section_analyses=result.section_analyses_json,
                              citations=result.citations_json,
                              has_arguments=has_arguments)

    else:
        # Unknown status
        flash(f'Unknown analysis status: {job.status}', 'warning')
        return redirect(url_for('dashboard'))

@app.route('/reanalyze/<job_id>/<perspective>')
@login_required
def reanalyze(job_id, perspective):
    """Re-analyze with different perspective"""
    if perspective not in ['prosecutor', 'defense', 'neutral']:
        flash('Invalid perspective', 'danger')
        return redirect(url_for('view_analysis', job_id=job_id))

    user_id = session.get('user_id')
    user = db.session.get(User, user_id)
    original_job = AnalysisJob.query.filter_by(job_uuid=job_id).first_or_404()

    # 🔥 ENSURE USER HAS SUBSCRIPTION AND CAN ANALYZE
    subscription = ensure_user_subscription(user)
    if subscription and not subscription.can_analyze_document():
        flash('Monthly analysis limit reached. Please upgrade your plan for unlimited analyses.', 'warning')
        return redirect(url_for('subscription.view_plans'))

    # Create new job with same file but different perspective
    new_job_uuid = str(uuid.uuid4())

    # Determine analysis type
    analysis_type = "image" if is_image_file(original_job.original_filename) else "document"

    new_job = AnalysisJob(
        job_uuid=new_job_uuid,
        user_id=user_id,
        company_id=original_job.company_id,
        case_id=original_job.case_id,
        filename=original_job.filename,
        original_filename=original_job.original_filename,
        file_path=original_job.file_path,
        perspective=perspective,
        status="processing",
        additional_instructions=original_job.additional_instructions
    )

    db.session.add(new_job)
    db.session.commit()

    # Start analysis in background
    analysis_thread = threading.Thread(
        target=background_analysis,
        args=(new_job_uuid, original_job.file_path, perspective, analysis_type, original_job.additional_instructions)
    )
    analysis_thread.daemon = True
    analysis_thread.start()

    # 🔥 FIXED: Redirect to view_analysis instead of show_analysis
    return redirect(url_for('view_analysis', job_id=new_job_uuid))

@app.route('/compare/<job_id_1>/<job_id_2>', methods=['GET'])
@login_required
def compare_analyses(job_id_1, job_id_2):
    """Display two analyses side by side for comparison"""
    # Get both jobs
    job1 = AnalysisJob.query.filter_by(job_uuid=job_id_1).first()
    job2 = AnalysisJob.query.filter_by(job_uuid=job_id_2).first()

    if not job1 or not job2:
        flash('One or both analyses not found', 'danger')
        return redirect(url_for('dashboard'))

    # Check if user has permission to view these jobs
    user_id = session.get('user_id')
    user = User.query.get(user_id)

    # Check case access if jobs are associated with cases
    if job1.case and not user.can_view_case(job1.case):
        flash('You do not have permission to view one of these analyses', 'danger')
        return redirect(url_for('dashboard'))

    if job2.case and not user.can_view_case(job2.case):
        flash('You do not have permission to view one of these analyses', 'danger')
        return redirect(url_for('dashboard'))

    if not user.is_admin() and not user.is_company_admin():
        if job1.user_id != user_id or job2.user_id != user_id:
            flash('You do not have permission to view one or both of these analyses', 'danger')
            return redirect(url_for('dashboard'))

    # Check if both jobs are completed
    if job1.status != "completed" or job2.status != "completed":
        flash('Both analyses must be completed before comparing', 'warning')
        return redirect(url_for('dashboard'))

    # Get results for both jobs
    results1 = job1.results
    results2 = job2.results

    if not results1 or not results2:
        flash('Analysis results not found', 'danger')
        return redirect(url_for('dashboard'))

    # Determine analysis types
    analysis_type1 = "image" if is_image_file(job1.original_filename) else "document"
    analysis_type2 = "image" if is_image_file(job2.original_filename) else "document"

    # Format job data for template
    formatted_job1 = {
        "job_id": job1.job_uuid,
        "filename": job1.original_filename,
        "perspective": job1.perspective,
        "status": job1.status,
        "analysis_type": analysis_type1,
        "case": job1.case
    }

    formatted_job2 = {
        "job_id": job2.job_uuid,
        "filename": job2.original_filename,
        "perspective": job2.perspective,
        "status": job2.status,
        "analysis_type": analysis_type2,
        "case": job2.case
    }

    # Combine unique citations from both analyses
    all_citations = {}

    for citation in results1.citations_json:
        citation_text = citation.get('text', '')
        if citation_text:
            all_citations[citation_text] = citation

    for citation in results2.citations_json:
        citation_text = citation.get('text', '')
        if citation_text and citation_text not in all_citations:
            all_citations[citation_text] = citation

    combined_citations = list(all_citations.values())

    # Log the activity
    log_activity(
        user_id,
        'analysis_comparison',
        f"Compared analyses: {job1.original_filename} ({job1.perspective}) and {job2.original_filename} ({job2.perspective})",
        'analysis_job',
        job1.id
    )

    # Render the comparison template with both analyses
    return render_template('compare.html',
                          job1=formatted_job1,
                          job2=formatted_job2,
                          final_analysis1=results1.final_analysis,
                          final_analysis2=results2.final_analysis,
                          citations=combined_citations,
                          current_user=user)

@app.route('/upload', methods=['POST'])
@csrf.exempt
@login_required
def upload_file():
    """Handle file upload and start analysis - FIXED to handle existing case documents"""
    try:
        # Get form data first
        perspective = request.form.get('perspective', 'neutral')
        additional_instructions = request.form.get('additional_instructions', '')
        practice_area = request.form.get('practice_area', 'general')
        analysis_type_param = request.form.get('analysis_type', 'general_analysis')
        case_id = request.form.get('case_id')
        document_id = request.form.get('document_id')  # This is key for existing documents
        retry_file_path = request.form.get('retry_file_path')  # For retry from error page

        # Validate perspective
        valid_perspectives = ['prosecutor', 'defense', 'neutral']
        if perspective not in valid_perspectives:
            return jsonify({'error': 'Invalid perspective'}), 400

        # Get current user
        user_id = session.get('user_id')
        user = db.session.get(User, user_id)

        # Check usage limits
        subscription = ensure_user_subscription(user)
        if subscription and not subscription.can_analyze_document():
            return jsonify({
                'error': 'Monthly analysis limit reached',
                'message': 'Please upgrade your plan or purchase a single analysis',
                'show_options': True
            }), 429

        # Handle retry from error page
        if retry_file_path:
            print(f"🔄 Retrying analysis with existing file: {retry_file_path}")
            if os.path.exists(retry_file_path):
                file_path = retry_file_path
                original_filename = request.form.get('original_filename', 'document.pdf')
            else:
                return jsonify({'error': 'Retry file not found'}), 404
        # ✅ FIXED: Handle existing case document
        elif document_id:
            print(f"🔄 Analyzing existing case document ID: {document_id}")
            
            # Get the existing document
            existing_doc = db.session.get(CaseDocument, document_id)
            if not existing_doc:
                return jsonify({'error': 'Document not found'}), 404
            
            # Verify user can access this document
            if existing_doc.case and not user.can_view_case(existing_doc.case):
                return jsonify({'error': 'Access denied'}), 403
            
            # Check if file still exists
            if not existing_doc.file_path or not os.path.exists(existing_doc.file_path):
                return jsonify({'error': 'Document file not found on server'}), 404
            
            # Use existing document info
            file_path = existing_doc.file_path
            original_filename = existing_doc.original_filename
            unique_filename = existing_doc.filename
            
            print(f"✅ Using existing document: {original_filename}")
            
        else:
            # ✅ Handle new file upload (original logic)
            if 'file' not in request.files:
                return jsonify({'error': 'No file part'}), 400

            file = request.files['file']
            if file.filename == '':
                return jsonify({'error': 'No selected file'}), 400

            if not allowed_file(file.filename):
                return jsonify({'error': 'File type not allowed'}), 400

            # Generate unique filename and save file
            original_filename = secure_filename(file.filename)
            file_extension = original_filename.rsplit('.', 1)[1].lower()
            unique_filename = f"{user_id}_{int(time.time())}_{uuid.uuid4().hex[:8]}.{file_extension}"
            file_path = os.path.join(app.config['UPLOAD_FOLDER'], unique_filename)

            # Save the uploaded file
            file.save(file_path)
            print(f"✅ New file uploaded: {original_filename}")

        # Determine analysis type
        analysis_type = "image" if is_image_file(original_filename) else "document"

        # Create analysis job
        job_uuid = str(uuid.uuid4())
        job = AnalysisJob(
            job_uuid=job_uuid,
            user_id=user_id,
            company_id=user.company_id,
            case_id=case_id if case_id else None,
            filename=unique_filename,
            original_filename=original_filename,
            file_path=file_path,
            perspective=perspective,
            practice_area=practice_area,
            analysis_type=analysis_type_param,
            status="processing",
            additional_instructions=additional_instructions
        )

        db.session.add(job)
        db.session.commit()

        # Start analysis in background thread
        analysis_thread = threading.Thread(
            target=background_analysis,
            args=(job_uuid, file_path, perspective, analysis_type, additional_instructions)
        )
        analysis_thread.daemon = True
        analysis_thread.start()

        # Log the activity
        action_desc = f'Started {perspective} {analysis_type} analysis'
        if document_id:
            action_desc += f' of existing document {original_filename}'
        else:
            action_desc += f' of {original_filename}'
            
        log_activity(
            user_id,
            'analysis_started',
            action_desc,
            'analysis_job',
            job.id
        )

        return jsonify({
            'success': True,
            'job_id': job_uuid,
            'message': 'Analysis started successfully'
        })

    except Exception as e:
        print(f"Upload error: {e}")
        # Clean up file if it was created for new uploads
        if 'file_path' in locals() and not document_id and os.path.exists(file_path):
            os.remove(file_path)

        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

# 🔥 FIXED: Remove duplicate route and fix the existing one
@app.route('/show_analysis/<job_id>')
@login_required
def show_analysis(job_id):
    """Redirect to the main analysis view - FIXED"""
    return redirect(url_for('view_analysis', job_id=job_id))

@app.route('/job_status/<job_id>')
@csrf.exempt
@login_required
def job_status(job_id):
    """Check the status of an analysis job"""
    job = AnalysisJob.query.filter_by(job_uuid=job_id).first()

    if not job:
        return jsonify({"error": "Job not found"}), 404

    # Check permissions
    user_id = session.get('user_id')
    user = db.session.get(User, user_id)

    if job.user_id != user_id and not user.is_admin() and not (user.is_company_admin() and job.company_id == user.company_id):
        return jsonify({"error": "Permission denied"}), 403

    # Check if job is completed and has results
    if job.status == "completed" and job.results:
        return jsonify({
            "status": job.status,
            "completed": True,
            "results_url": url_for('view_analysis', job_id=job_id)
        })
    elif job.status == "error":
        return jsonify({
            "status": job.status,
            "completed": True,
            "error": job.error_message or "Analysis failed"
        })
    else:
        return jsonify({
            "status": job.status,
            "completed": False
        })

@app.route('/my-analyses')
@login_required
def my_analyses():
    return redirect(url_for('dashboard') + '#recent-activity')

@app.route('/profile', methods=['GET', 'POST'])
@login_required
def user_profile():
    """User profile page"""
    user_id = session.get('user_id')
    user = db.session.get(User, user_id)

    if request.method == 'POST':
        # Update user profile
        first_name = request.form.get('first_name')
        last_name = request.form.get('last_name')

        # Update password if provided
        password = request.form.get('password')
        if password:
            confirm_password = request.form.get('confirm_password')
            if password != confirm_password:
                flash('Passwords do not match', 'danger')
                return render_template('profile.html', user=user)

            user.set_password(password)

        # Update user
        user.first_name = first_name
        user.last_name = last_name

        db.session.commit()

        # Log the activity
        log_activity(
            user_id,
            'profile_update',
            f"User updated their profile"
        )

        flash('Profile updated successfully', 'success')
        return redirect(url_for('user_profile'))

    return render_template('profile.html', user=user)

@app.route('/faq')
def faq():
    """FAQ page - publicly accessible"""
    import markdown
    # Read the FAQ markdown file
    faq_path = os.path.join(app.root_path, 'FAQ.md')
    with open(faq_path, 'r') as f:
        faq_markdown = f.read()

    # Convert markdown to HTML
    faq_html = markdown.markdown(faq_markdown, extensions=['tables', 'fenced_code'])

    return render_template('faq.html', faq_content=faq_html)

@app.route('/bar-compliance')
def bar_compliance():
    """Bar Compliance Statement - publicly accessible"""
    return render_template('bar_compliance.html')

@app.route('/contact', methods=['GET', 'POST'])
def contact():
    """Contact form with anti-spam and security features"""
    if request.method == 'GET':
        return render_template('contact.html')

    # POST request - handle form submission
    is_ajax = request.headers.get('X-Requested-With') == 'XMLHttpRequest'

    # Anti-spam: Check honeypot field
    honeypot = request.form.get('website', '').strip()
    if honeypot:
        # Spam detected - fail silently
        if is_ajax:
            return jsonify({'success': True, 'message': 'Message sent successfully!'})
        flash('Message sent successfully!', 'success')
        return redirect(url_for('contact'))

    # Anti-spam: Check timestamp (minimum 3 seconds since page load)
    try:
        timestamp = int(request.form.get('timestamp', 0))
        time_elapsed = int(time.time() * 1000) - timestamp
        if time_elapsed < 3000:  # Less than 3 seconds
            if is_ajax:
                return jsonify({'success': False, 'message': 'Please take a moment to review your message.'})
            flash('Please take a moment to review your message.', 'warning')
            return redirect(url_for('contact'))
    except (ValueError, TypeError):
        pass

    # Validate required fields
    name = request.form.get('name', '').strip()
    email = request.form.get('email', '').strip()
    subject = request.form.get('subject', '').strip()
    message = request.form.get('message', '').strip()
    phone = request.form.get('phone', '').strip()

    # Validation
    if not all([name, email, subject, message]):
        if is_ajax:
            return jsonify({'success': False, 'message': 'All required fields must be filled out.'})
        flash('All required fields must be filled out.', 'danger')
        return redirect(url_for('contact'))

    if len(message) < 10:
        if is_ajax:
            return jsonify({'success': False, 'message': 'Message must be at least 10 characters long.'})
        flash('Message must be at least 10 characters long.', 'danger')
        return redirect(url_for('contact'))

    # Email validation
    import re
    email_regex = r'^[^\s@]+@[^\s@]+\.[^\s@]+$'
    if not re.match(email_regex, email):
        if is_ajax:
            return jsonify({'success': False, 'message': 'Please enter a valid email address.'})
        flash('Please enter a valid email address.', 'danger')
        return redirect(url_for('contact'))

    # Send email via SMTP to spacy.covereddata.com
    try:
        import smtplib
        from email.mime.text import MIMEText
        from email.mime.multipart import MIMEMultipart

        # Create email message
        msg = MIMEMultipart('alternative')
        msg['From'] = f"{name} <noreply@epolaw.ai>"
        msg['To'] = "support@epolaw.ai"
        msg['Subject'] = f"[EPOLaw Contact Form] {subject}"
        msg['Reply-To'] = email

        # Create HTML email body
        html_body = f"""
        <html>
        <body style="font-family: Arial, sans-serif; line-height: 1.6; color: #333;">
            <div style="max-width: 600px; margin: 0 auto; padding: 20px; background: #f9f9f9; border-radius: 10px;">
                <h2 style="color: #667eea; border-bottom: 3px solid #667eea; padding-bottom: 10px;">
                    New Contact Form Submission
                </h2>

                <div style="background: white; padding: 20px; border-radius: 5px; margin: 20px 0;">
                    <h3 style="color: #764ba2; margin-top: 0;">Contact Information</h3>
                    <table style="width: 100%; border-collapse: collapse;">
                        <tr>
                            <td style="padding: 8px 0; font-weight: bold; width: 120px;">Name:</td>
                            <td style="padding: 8px 0;">{name}</td>
                        </tr>
                        <tr>
                            <td style="padding: 8px 0; font-weight: bold;">Email:</td>
                            <td style="padding: 8px 0;"><a href="mailto:{email}">{email}</a></td>
                        </tr>
                        <tr>
                            <td style="padding: 8px 0; font-weight: bold;">Phone:</td>
                            <td style="padding: 8px 0;">{phone if phone else 'Not provided'}</td>
                        </tr>
                        <tr>
                            <td style="padding: 8px 0; font-weight: bold;">Subject:</td>
                            <td style="padding: 8px 0;">{subject}</td>
                        </tr>
                    </table>
                </div>

                <div style="background: white; padding: 20px; border-radius: 5px; margin: 20px 0;">
                    <h3 style="color: #764ba2; margin-top: 0;">Message</h3>
                    <p style="white-space: pre-wrap;">{message}</p>
                </div>

                <div style="background: #f0f0f0; padding: 15px; border-radius: 5px; margin: 20px 0; font-size: 0.9em; color: #666;">
                    <h4 style="margin-top: 0; color: #666;">Submission Details</h4>
                    <p style="margin: 5px 0;">
                        <strong>Timestamp:</strong> {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}<br>
                        <strong>IP Address:</strong> {request.remote_addr}<br>
                        <strong>User Agent:</strong> {request.headers.get('User-Agent', 'Unknown')}<br>
                        <strong>Source:</strong> EPOLaw Contact Form
                    </p>
                </div>
            </div>
        </body>
        </html>
        """

        # Create plain text alternative
        text_body = f"""
EPOLaw Contact Form Submission

Contact Information:
--------------------
Name: {name}
Email: {email}
Phone: {phone if phone else 'Not provided'}
Subject: {subject}

Message:
--------
{message}

Submission Details:
------------------
Timestamp: {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}
IP Address: {request.remote_addr}
User Agent: {request.headers.get('User-Agent', 'Unknown')}
Source: EPOLaw Contact Form
        """

        # Attach both HTML and plain text versions
        part1 = MIMEText(text_body, 'plain')
        part2 = MIMEText(html_body, 'html')
        msg.attach(part1)
        msg.attach(part2)

        # Connect to SMTP server and send email
        with smtplib.SMTP('spacy.covereddata.com', 587) as server:
            server.sendmail('noreply@epolaw.ai', 'support@epolaw.ai', msg.as_string())

        # Log successful contact form submission
        if session.get('user_id'):
            log_activity(
                session.get('user_id'),
                'contact_form',
                f"User {name} submitted contact form: {subject}"
            )

        if is_ajax:
            return jsonify({'success': True, 'message': 'Thank you for contacting us! We will respond within 24 hours.'})
        flash('Thank you for contacting us! We will respond within 24 hours.', 'success')
        return redirect(url_for('contact'))

    except Exception as e:
        # Log the error
        print(f"Contact form email error: {str(e)}")

        if is_ajax:
            return jsonify({
                'success': False,
                'message': 'We are experiencing technical difficulties. Please call us at 702.605.4997.'
            })
        flash('We are experiencing technical difficulties. Please call us at 702.605.4997.', 'warning')
        return redirect(url_for('contact'))

# ===== ENHANCED LEGAL RESEARCH API ROUTES =====

@app.route('/api/recent_analyses', methods=['GET'])
@login_required
def api_recent_analyses():
    """API endpoint to get recent completed analyses for comparison"""
    user_id = session.get('user_id')
    user = User.query.get(user_id)

    # Get user's completed analyses (last 50)
    if user.is_admin():
        analyses = AnalysisJob.query.filter_by(status='completed').order_by(
            AnalysisJob.created_at.desc()
        ).limit(50).all()
    elif user.is_company_admin():
        analyses = AnalysisJob.query.filter_by(
            company_id=user.company_id,
            status='completed'
        ).order_by(AnalysisJob.created_at.desc()).limit(50).all()
    else:
        analyses = AnalysisJob.query.filter_by(
            user_id=user_id,
            status='completed'
        ).order_by(AnalysisJob.created_at.desc()).limit(50).all()

    # Format for JSON response
    analyses_data = []
    for analysis in analyses:
        analyses_data.append({
            'job_uuid': analysis.job_uuid,
            'filename': analysis.filename,
            'original_filename': analysis.original_filename,
            'perspective': analysis.perspective,
            'created_at': analysis.created_at.isoformat() if analysis.created_at else None,
            'case_name': analysis.case.case_name if analysis.case else None,
            'case_id': analysis.case_id
        })

    return jsonify({
        'success': True,
        'analyses': analyses_data
    })

@app.route('/api/enhanced-legal-search', methods=['POST'])
@csrf.exempt
@login_required
def enhanced_legal_search():
    """Enhanced legal search using CourtListener + Claude AI"""
    try:
        data = request.get_json()
        query = data.get('query', '').strip()
        
        if not query:
            return jsonify({'success': False, 'error': 'Query required'}), 400

        user_id = session.get('user_id')
        
        # Log the search activity
        log_activity(
            user_id,
            'enhanced_legal_search',
            f'Enhanced legal search for: {query}'
        )

        print(f"🔍 Enhanced legal search started for: {query}")

        results = {
            'courtlistener_results': [],
            'ai_analysis': '',
            'success': True,
            'query': query
        }
        
        # 1. Search CourtListener (reuse existing logic)
        try:
            print("📚 Searching CourtListener...")
            courtlistener_results = search_courtlistener_api(query)
            results['courtlistener_results'] = courtlistener_results
            print(f"✅ CourtListener search completed: {len(courtlistener_results)} results")
        except Exception as e:
            print(f"❌ CourtListener search error: {e}")
        
        # 2. Get Claude AI analysis
        try:
            print("🤖 Starting Claude AI analysis...")
            claude_analysis = search_with_claude_ai(query)
            results['ai_analysis'] = claude_analysis
            print("✅ Claude AI analysis completed")
        except Exception as e:
            print(f"❌ Claude AI analysis error: {e}")
            results['ai_analysis'] = f"AI analysis temporarily unavailable: {str(e)}"
        
        print(f"🎉 Enhanced search completed for: {query}")
        return jsonify(results)
        
    except Exception as e:
        print(f"❌ Enhanced search error: {str(e)}")
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

# ===== COURTLISTENER INTEGRATION ROUTES =====

@app.route('/api/courtlistener-search', methods=['POST'])
@csrf.exempt
@login_required
def courtlistener_search():
    """CourtListener search with enhanced synopsis extraction and CloudFront compatibility"""
    try:
        data = request.get_json()
        query = data.get('query', '').strip()

        if not query:
            return jsonify({'success': False, 'error': 'Query required'}), 400

        api_token = os.environ.get('COURTLISTENER_API_TOKEN')
        if not api_token:
            return jsonify({'success': False, 'error': 'API token not configured'}), 500

        # ✅ ENHANCED HEADERS - Critical for CloudFront
        headers = {
            'Authorization': f'Token {api_token}',
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Cache-Control': 'max-age=0'
        }

        search_params = {
            'q': query,
            'type': 'o',  # Opinions
            'format': 'json'  # Explicitly request JSON
        }

        print(f"🔍 Searching CourtListener with query: {query}")
        print(f"🔗 API Token present: {'Yes' if api_token else 'No'}")

        # ✅ ENHANCED REQUEST with SSL and session handling
        session = requests.Session()
        session.headers.update(headers)
        
        try:
            response = session.get(
                'https://www.courtlistener.com/api/rest/v4/search/',
                params=search_params,
                timeout=30,  # Increased timeout
                verify=True,  # SSL verification
                allow_redirects=True,
                stream=False
            )
            
            print(f"📡 Response status: {response.status_code}")
            print(f"📡 Response headers: {dict(response.headers)}")
            
            # ✅ Handle different response codes
            if response.status_code == 403:
                print(f"❌ 403 Forbidden - Check API token or rate limits")
                return jsonify({
                    'success': False,
                    'error': 'Access forbidden. Please check your API token or you may have exceeded rate limits.',
                    'status_code': 403
                }), 403
                
            elif response.status_code == 401:
                print(f"❌ 401 Unauthorized - Invalid API token")
                return jsonify({
                    'success': False,
                    'error': 'Invalid API token. Please check your COURTLISTENER_API_TOKEN.',
                    'status_code': 401
                }), 401
                
            elif response.status_code == 429:
                print(f"❌ 429 Rate Limited")
                return jsonify({
                    'success': False,
                    'error': 'Rate limit exceeded. Please try again later.',
                    'status_code': 429
                }), 429
                
            elif response.status_code != 200:
                print(f"❌ Unexpected status: {response.status_code}")
                return jsonify({
                    'success': False,
                    'error': f'API returned status {response.status_code}',
                    'status_code': response.status_code
                }), response.status_code

        except requests.exceptions.SSLError as e:
            print(f"❌ SSL Error: {e}")
            return jsonify({
                'success': False,
                'error': 'SSL connection error. Please try again.',
                'details': str(e)
            }), 503
            
        except requests.exceptions.ConnectionError as e:
            print(f"❌ Connection error: {e}")
            return jsonify({
                'success': False,
                'error': 'Unable to connect to CourtListener. Please check your internet connection.',
                'details': str(e)
            }), 503
            
        except requests.exceptions.Timeout:
            print(f"❌ Request timed out")
            return jsonify({
                'success': False,
                'error': 'Request timed out. CourtListener may be temporarily unavailable.'
            }), 504
            
        except requests.exceptions.RequestException as e:
            print(f"❌ Request error: {e}")
            return jsonify({
                'success': False,
                'error': 'Failed to search CourtListener',
                'details': str(e)
            }), 500

        # ✅ PROCESS SUCCESSFUL RESPONSE
        try:
            api_data = response.json()
        except ValueError as e:
            print(f"❌ JSON decode error: {e}")
            print(f"Response content: {response.text[:500]}")
            return jsonify({
                'success': False,
                'error': 'Invalid response format from CourtListener'
            }), 502

        print(f"✅ API response received: {api_data.get('count', 0)} results")

        results = []
        for i, item in enumerate(api_data.get('results', [])[:10]):
            try:
                print(f"\n--- Processing result {i+1} ---")

                case_name = extract_case_name(item)
                citation = extract_citation(item)

                # Extract court
                court = 'Unknown Court'
                if 'court' in item and item['court']:
                    if isinstance(item['court'], dict):
                        court = item['court'].get('name', item['court'].get('short_name', 'Unknown Court'))
                    else:
                        court = str(item['court'])

                # Extract date
                date_filed = item.get('dateFiled', item.get('date_filed', 'Date not available'))

                # Extract synopsis/summary from multiple possible fields
                synopsis = extract_case_synopsis(item)

                # Build URL
                absolute_url = ''
                if 'absolute_url' in item and item['absolute_url']:
                    absolute_url = item['absolute_url']
                    if not absolute_url.startswith('http'):
                        absolute_url = f"https://www.courtlistener.com{absolute_url}"

                result_item = {
                    'case_name': case_name,
                    'citation': citation,
                    'court': court,
                    'date': date_filed,
                    'snippet': synopsis,
                    'url': absolute_url,
                    'relevance_score': item.get('score', 0)
                }

                print(f"Result {i+1}: {result_item['case_name']} | {result_item['citation']}")
                print(f"Synopsis length: {len(synopsis)} chars")
                results.append(result_item)

            except Exception as item_error:
                print(f"Error processing result {i+1}: {str(item_error)}")
                continue

        return jsonify({
            'success': True,
            'query': query,
            'results_found': api_data.get('count', 0),
            'results': results,
            'message': f'Found {len(results)} cases for "{query}"'
        })

    except Exception as e:
        print(f"❌ Search error: {str(e)}")
        import traceback
        traceback.print_exc()
        return jsonify({
            'success': False,
            'error': str(e)
        }), 500

# Debug routes for troubleshooting
@app.route('/debug/job/<job_id>')
@login_required
def debug_job(job_id):
    """Debug route to check job status and restart if needed"""
    job = AnalysisJob.query.filter_by(job_uuid=job_id).first()

    if not job:
        return jsonify({"error": "Job not found"}), 404

    # Show current job details
    job_info = {
        "job_uuid": job.job_uuid,
        "status": job.status,
        "error_message": job.error_message,
        "created_at": str(job.created_at),
        "completed_at": str(job.completed_at) if job.completed_at else None,
        "file_path": job.file_path,
        "original_filename": job.original_filename,
        "perspective": job.perspective
    }

    return jsonify(job_info)

@app.route('/debug/restart/<job_id>', methods=['POST'])
@login_required
def restart_job(job_id):
    """Restart a stuck analysis job"""
    job = AnalysisJob.query.filter_by(job_uuid=job_id).first()

    if not job:
        return jsonify({"error": "Job not found"}), 404

    # Check if file still exists
    if not os.path.exists(job.file_path):
        return jsonify({"error": "Original file no longer exists"}), 400

    # Reset job status
    job.status = "processing"
    job.error_message = None
    job.completed_at = None
    db.session.commit()

    # Determine analysis type
    analysis_type = "image" if is_image_file(job.original_filename) else "document"

    # Restart the background analysis
    analysis_thread = threading.Thread(
        target=background_analysis,
        args=(job_id, job.file_path, job.perspective, analysis_type, job.additional_instructions)
    )
    analysis_thread.daemon = True
    analysis_thread.start()

    return jsonify({"success": True, "message": "Job restarted"})

@app.route('/debug/error/<job_id>', methods=['POST'])
@login_required
def mark_job_failed(job_id):
    """Mark a stuck job as failed so user can try again"""
    job = AnalysisJob.query.filter_by(job_uuid=job_id).first()

    if not job:
        return jsonify({"error": "Job not found"}), 404

    job.status = "error"
    job.error_message = "Analysis timed out - please try again"
    job.completed_at = datetime.utcnow()
    db.session.commit()

    return jsonify({"success": True, "message": "Job marked as failed"})

# Subscription initialization functions
def initialize_subscription_plans():
    """Initialize default subscription plans if they don't exist"""
    if SubscriptionPlan.query.count() == 0:
        for plan_data in DEFAULT_PLANS:
            plan = SubscriptionPlan(
                name=plan_data['name'],
                display_name=plan_data['display_name'],
                price=plan_data['price'],
                stripe_price_id=plan_data.get('stripe_price_id'),
                features=plan_data['features'],
                is_active=True
            )
            db.session.add(plan)
        db.session.commit()
        print("✅ Default subscription plans initialized")

# Initialize database and subscription plans when app starts
def create_app():
    """Application factory function for production"""
    with app.app_context():
        # Initialize database tables
        db.create_all()

        # Initialize subscription plans
        initialize_subscription_plans()

        print("✅ Database initialized successfully")

    return app

# WSGI entry point for production (Gunicorn will use this)
application = create_app()

# For development only
if __name__ == '__main__':
    # Run the application in development mode
    app.run(host='127.0.0.1', port=5000, debug=True)
