from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from serp.models import Keyword, Groups, Competitors
from django.http import JsonResponse
import requests
import threading
import json
import os
from django.conf import settings
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from datetime import date
import time
import gc
API_KEY = "67d1a58f740bc0b8bfd018ac"
URL = "https://api.scrapingdog.com/google/"
KEYWORD_THREAD_LIMIT = 5 
# RAPID_API_KEY = "f6d117df33msh3da512288c98c0cp17197ajsn973cb4ee4f5f"


BATCH_SIZE=10


def save_json_file(data, filename="data.json"):
    # Define the file path inside Django's media directory
    file_path = os.path.join(settings.MEDIA_ROOT, filename)

    # Ensure the directory exists
    os.makedirs(os.path.dirname(file_path), exist_ok=True)

    # Write data to JSON file
    with open(file_path, "w", encoding="utf-8") as json_file:
        json.dump(data, json_file, indent=4)

    return file_path


def get_latest_rank_trend(rank_history):
    try:
        """
        Compares the two latest entries in rank_history and returns 'up', 'down', or 'same'.
        Assumes lower rank is better.
        """

        if not isinstance(rank_history, list) or len(rank_history) < 2:
            return "NA"

        # Sort rank history by year, month, date
        sorted_history = sorted(rank_history, key=lambda x: (x["year"], x["month"], x["date"]))

        # Get the last two records (latest two dates)
        latest_two = sorted_history[-2:]

        current = latest_two[1]["rank"]
        previous = latest_two[0]["rank"]

        if current < previous:
            return "up"
        elif current > previous:
            return "down"
        else:
            return "same"
    except Exception as e:
        print(str(e))
        return "NA"


def remove_duplicate_ranks_by_date(rank_history):
    unique = {}
    for entry in rank_history:
        key = (entry["year"], entry["month"], entry["date"])
        unique[key] = entry
    return list(unique.values())



    
    
# @api_view(["GET"])
# @permission_classes((AllowAny,))

# def keywordRanker(request, sort_order):
#     """
#     Cron: Process keywords 10 at a time (status INIT) until all are done,
#     pausing 2 seconds between each keyword and batch.
#     """
#     print("🔥 Cron started")

#     while True:
#         keywords = list(Keyword.objects.filter(track_status="INIT")[:BATCH_SIZE])
#         if not keywords:
#             print("✅ No more INIT keywords.")
#             break

#         for keyword in keywords:
#             process_keyword(keyword)
#             time.sleep(2)  # small delay between keywords to respect API limits

#         del keywords
#         gc.collect()

#         print("⏸️ Sleeping 2 seconds before next batch...")
#         time.sleep(2)

#     return JsonResponse({"status": "true", "message": "All INIT keywords processed in batches."})


def process_keyword(keyword):
    # Your existing keyword processing logic here
    try:
        if keyword.track_status != "INIT":
            return
        # Mark keyword as scheduled
        keyword.track_status = "SCHD"
        keyword.save()
        print(f"Processing keyword: {keyword.keyword}")
        # Simulate processing time
        time.sleep(1)
        # After processing, mark as complete
        keyword.track_status = "COMP"
        keyword.save()
        print(f"Completed keyword: {keyword.keyword}")
    except Exception as e:
        print(f"Failed to process keyword {keyword.keyword}: {e}")
        keyword.track_status = "FAIL"
        keyword.save()


def background_keyword_processor():
    print("Background thread started for keyword processing")
    try:
        while True:
            keywords = list(Keyword.objects.filter(track_status="INIT")[:BATCH_SIZE])
            if not keywords:
                print("No more INIT keywords. Background thread ending.")
                break

            for keyword in keywords:
                process_keyword(keyword)
                time.sleep(2)  # small delay to respect API limits

            del keywords
            gc.collect()
            time.sleep(2)  # pause before next batch
    except Exception as e:
        print(f"Exception in background thread: {e}")
    print("Background thread finished processing all keywords")

@api_view(["GET"])
@permission_classes((AllowAny,))
def keywordRanker(request, sort_order):
    # Start the background thread only once per request
    thread = threading.Thread(target=background_keyword_processor)
    thread.daemon = True  # so it won't block server shutdown
    thread.start()

    return JsonResponse({"status": "started", "message": "Keyword processing started in background thread."})
