# views.py
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# from .models import ChatMessage
from django.contrib.auth.models import User
from django.http import JsonResponse,HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
from datetime import timedelta,datetime,timezone
from time import time,sleep
from langchain.chat_models import ChatOpenAI
import os,sys
# from langchain_community.vectorstores import FAISS
import io
from typing import List
from langchain.load.dump import dumps

import elasticsearch
# import uuid
from langchain_core.messages import (
    BaseMessage,
    message_to_dict,
    messages_from_dict,
)
from glob import glob
from math import floor
from langchain.prompts import ChatPromptTemplate,PromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import ElasticsearchStore
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages.human import HumanMessage
from langchain_core.messages.ai import AIMessage
import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch,NotFoundError
from collections import OrderedDict
from decouple import config
from openai import OpenAI
from PIL import Image
from io import BytesIO
from django.core.mail import send_mail
from langchain_core.messages.system import SystemMessage
from langchain_core.messages.chat import ChatMessage
sys.path.append(os.path.join(os.path.dirname(__file__)))
# from index_attached import start_indexing
from langchain_core.documents import Document
import os
from Loader_functions_utils import DocumentLoader
document_loader = DocumentLoader()
from langchain.docstore.document import Document as BaseDocument
import tiktoken
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect
from django.urls import reverse

import base64
import bcrypt

import openai
import LogUtils
logger = LogUtils.getRootLogger()

base_url = "https://scicdev.crm.dynamics.com"

# Azure AD Application Client ID and Secret
client_id = "6e5989aa-4fd1-4015-8648-8f75609d607b"
client_secret = "mUv8Q~1u3JtjdAdw.ov1sRfjzTm3yS.f~cC6wcQS"
tenant_id = "95f69e7d-1811-4ab9-9b5a-eba95d3eba9b"

# Get OAuth Token
def get_token(client_id, client_secret, tenant_id):
    token_url = f"https://login.microsoftonline.com/{tenant_id}/oauth2/token"
    payload = {
        "grant_type": "client_credentials",
        "client_id": client_id,
        "client_secret": client_secret,
        "resource": base_url,
    }
    response = requests.post(token_url, data=payload)
    if response.status_code == 200:
        return response.json().get("access_token")
    else:
        raise Exception(f"Failed to retrieve token: {response.text}")

def get_data_by_email_username_password(email=None, username=None, password=None,headers=None):
    # Replace this URL with the entity you want to query (e.g., 'contacts' or 'systemusers')
    url = "https://scicdev.crm.dynamics.com/api/data/v9.2/contacts"  # or 'systemusers' depending on your CRM schema
    
    # Construct OData query filter based on available fields
    filter_query = []
    if email:
        filter_query.append(f"emailaddress1 eq '{email}'")
    if username:
        filter_query.append(f"adx_identity_username eq '{username}'")
    if password:
        filter_query.append(f"trit_tnapassword eq '{password}'")  # (note: storing passwords directly is not recommended)

    # Combine filters using "and" logic
    filter_expression = " and ".join(filter_query) if filter_query else ""

    # Apply filter if available
    if filter_expression:
        url += f"?$filter={filter_expression}"

    # Send GET request with the filter applied
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        return response.json()  # Return the data if successful
    else:
        raise Exception(f"Failed to fetch data: {response.status_code} - {response.text}")

def is_valid_email(email):
    # Check if the email contains 'abc.com' or 'sdf.com'
    if 'scic.com' in email or 'riskeducation' in email:
        return True
    else:
        return False

def update_password(record_id, new_password,headers, entity="contacts"):
    # Endpoint to update a specific record
    url = f"https://scicdev.crm.dynamics.com/api/data/v9.2/{entity}({record_id})"
    
    # Data payload with the updated password field
    payload = {
        "trit_tnapassword": new_password  # Replace "password" with the actual field name in your schema
    }

    # Send PATCH request to update the record
    response = requests.patch(url, headers=headers, json=payload)
    if response.status_code == 204:
        print("Password updated successfully.")
    else:
        raise Exception(f"Failed to update password: {response.status_code} - {response.text}")


# logger.info('key v5%s'%config('SECRET_KEY'))
@csrf_exempt
def chatbot(request):
    name = request.session.get('name')  # Get 'name' from session
    email = request.session.get('email')  # Get 'name' from session

    if name:
        return render(request, "index.html", {'user_name': name,'mailid': email})
    else:
        return redirect("login")
    
@csrf_exempt
def login_view(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        password = request.POST["password"]
        email=request.POST["email"]
        print('username',username)
        print('password',password)
        logger.info("username%s "%username)

        try:
            
            # email = "chanceb123@gmail.com"
            # username = "chancebest1"
            # password = "Best@123"  # (Avoid storing plaintext passwords like this!)

            if is_valid_email(email):
                token = get_token(client_id, client_secret, tenant_id)
                headers = {
                    "Authorization": f"Bearer {token}",
                    "Content-Type": "application/json",
                }
                record = get_data_by_email_username_password(email=email, username=username,password=password,headers=headers)
                # print(json.dumps(record, indent=4)) 

            
                if len(record['value'])>0:
                    request.session['name'] = username
                    request.session['email'] = email

                else:
                    return render(request, "login.html", {"error": "Invalid credentials"})
            
            logger.info(f"User {username} authenticated successfully. Redirecting to chatbot.")
            return redirect(chatbot)  # Redirect to chatbot view

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    # If the request is GET, render the login page
    return render(request, "login.html")

@csrf_exempt
def forgot_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        email=request.POST["email"]
       
        try:
            token = get_token(client_id, client_secret, tenant_id)
            headers = {
                "Authorization": f"Bearer {token}",
                "Content-Type": "application/json",
            }
            record = get_data_by_email_username_password(email=email, username=username,headers=headers)
            # print(json.dumps(record, indent=4)) 

        
            if len(record['value'])>0:
                    
                    # Send verification email
                    message = "Click the link below to reset your password:\n\n"
                    message += request.build_absolute_uri("/reset-password/")
                    print('message', message)
                    
                    send_mail(
                        subject="Password Reset Request",
                        message=message,
                        from_email="tna@scic.com",
                        recipient_list=[email],
                        fail_silently=False,
                    )
                    
                    return render(request, "forgot_password.html", {"message": "Password reset link sent to your email."})
               
            else:
                return render(request, "forgot_password.html", {"error": "User not found."})

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "forgot_password.html")

@csrf_exempt
def reset_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        new_password = request.POST.get("new_password")
        email=request.POST["email"]

        try:
            
            if is_valid_email(email):
                token = get_token(client_id, client_secret, tenant_id)
                headers = {
                    "Authorization": f"Bearer {token}",
                    "Content-Type": "application/json",
                }
                record = get_data_by_email_username_password(email=email, username=username,headers=headers)
                # print(json.dumps(record, indent=4))             
                if len(record['value'])>0:
                    record_id=record["value"][0]["contactid"]
                    update_password(record_id, new_password,headers, entity="contacts")
                    return render(request, "login.html", {"message": "Password has been reset successfully."})
                else:
                    return HttpResponse("User not found.")
            else:
                return HttpResponse("User not found.")

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "reset_password.html")


@csrf_exempt
def signout(request):
    del request.session['name']
    login_url = reverse('login')  # Ensure the 'login' view is correctly mapped in your URLs
    return JsonResponse({
        'message': 'Signout successful', 
        'redirect_url': login_url
    })

@csrf_exempt
def get_statuslist(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        userId=request.POST["userid"]
        print('username', username)
        logger.info("username%s " % username)

        try:
            # Query Elasticsearch to find the user
            response = es.search(
                index='notifications',
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userId}},
                            ]
                        }
                    }
                }
            )
            
            userlist=[]
            # Check if any user matched the query
            if response['hits']['total']['value'] > 0:
                for hit in response['hits']['hits']:
                    source = hit['_source']
                    if source.get("request", 0) ==2:
                        request_sta="Approved"
                    elif source.get("request", 0) ==3:
                        request_sta="Declined"
                    else:
                        request_sta="Pending"
                        
                    formatted_doc = {
                        "name": source.get("username"),
                        "user_id": source.get("user_id"),
                        "message": source.get("message"),
                        "created_at": source.get("created_at"),
                        "request": request_sta
                        
                    }
                    userlist.append(formatted_doc)
                    
            return JsonResponse({"userlist": userlist})

                    
        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")
        
# Define the search query
def get_messages_by_user_and_session(username,ind_name,customer):
    query = {
       "query": {
         "bool": {
           "must": [
             {"term": {"username.keyword": username}},
             {"term": {"candidate_name.keyword": customer}}
           ]
         }
       },
       "size":10000,
       "sort": [
           {"lastmodified_at": {"order": "asc"}}
       ]
    }
    # Execute the search query
    response = es.search(index=ind_name, body=query)

    # Extract and return the messages
    messages = [hit['_source'] for hit in response['hits']['hits']]
    return messages

def unique_sources_with_pages(meta_data):
    unique_sources = set()
    
    for item in meta_data:
        source = item.get('source', '') 
        filename = os.path.basename(source)  # Get the file name with extension
        name_without_ext = os.path.splitext(filename)[0]  
        unique_sources.add(name_without_ext)
    
    # Convert the set of unique sources to a comma-separated string
    unique_sources_str = ', '.join(unique_sources)
    
    return unique_sources_str  
    
os.environ["OPENAI_API_KEY"] = config('SECRET_KEY')
embeddings = OpenAIEmbeddings()
# es_index_name ='allibot_v2_gpt'
# chatname_index='get_chatid'
# imtext_index='textlens1'
# doc_index='documents_up'

es_index_name = config('es_index_name')
chatname_index = config('chatname_index')
imtext_index = config('textlensindex')
doc_index = config('compareindex')
forms_index = config('formsindex')
org_details_index = config('riea_org_index')
user_details_index = config('riea_token_index')


# logger.info(es_index_name)
es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False

try :
    db = ElasticsearchStore(
        es_connection=es,
        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
        embedding=embeddings,
        strategy=ElasticsearchStore.ExactRetrievalStrategy()
    )
except Exception as ex:        
    logger.exception('Exception occured due to %s' % ex)
    if check_elastic_status():
        db = ElasticsearchStore(
            es_connection=es,
            index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
            embedding=embeddings,
            strategy=ElasticsearchStore.ExactRetrievalStrategy()
        )
        
def get_chat_id_names_by_session_id(index_name, session_id):
    query = {
        "size": 5000,
        "query": {"term": {"session_id.keyword": session_id}}  # Match the exact session ID
    }

    
    response = es.search(index=index_name, body=query)
    
    chat_id_name=''
    for hit in response['hits']['hits']:
        source = hit['_source']
        if 'chat_id_name' in source:
            chat_id_name = source.get('chat_id_name')
       
    return chat_id_name


def add_user_message_to_es(customer,username, session_id, user_prompt,index,user_token,modelname,is_safe, flagged_categories):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(HumanMessage(content=user_prompt)),
            ensure_ascii=True,
        ),
        "human token" : user_token,
        "is_safe":is_safe,
        "flagged_categories":flagged_categories,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    document_id = res.get('_id')
    return document_id
    # return res

def add_chat_message_to_es(customer,username, session_id, user_prompt,index,user_token,modelname,is_safe, flagged_categories):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(ChatMessage(content=user_prompt, role="user")),
            ensure_ascii=True,
        ),
        "human token" : user_token,
        "is_safe":is_safe,
        "flagged_categories":flagged_categories,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    return res

def add_assistant_message_to_es(customer,username, session_id, answer,assistant_token,modelname):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(AIMessage(content=answer)),
            ensure_ascii=True,
        ),
        "assistant token" : assistant_token,
        "feedback" : "null",
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res

def add_system_message_to_es(customer,username, session_id, answer,assistant_token,modelname):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(SystemMessage(content=answer)),
            ensure_ascii=True,
        ),
        "assistant token" : assistant_token,
        "feedback" : "null",
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res
def search_by_session_id(index_name, session_id):
    # Define the query
    query = {
        "size": 1000,
        "query": {
            "match": {
                "session_id": session_id
            }
        },
        "sort": [
            {
                "lastmodified_at": {
                    "order": "asc"
                }
            }
        ]
    }

    # Perform the search
    response = es.search(index=index_name, body=query)

    return response['hits']['hits']

def document_to_dict(doc):
    # Convert Document to a string representation if storing as text
    if hasattr(doc, 'metadata') and 'source' in doc.metadata:
        # Extract the file name from the source path
        file_name = os.path.basename(doc.metadata['source'])
        # Update the metadata with the new 'source' containing only the file name
        doc.metadata['source'] = file_name
        
    if isinstance(doc, dict):
        # If it is a dictionary, access keys
        return {
            'page_content': doc.get('page_content', ''),
            'metadata': doc.get('metadata', ''),
        }
    else:
    # Return page content and updated metadata as a JSON string
        return {
            'page_content': doc.page_content,
            'metadata': json.dumps(doc.metadata) if hasattr(doc, 'metadata') else '',
        }

def add_document_ai_to_es(username, session_id, docs1, es_index_name,orderid,file):
    # Convert docs1 to a list of dictionaries as strings
    documents_dicts = [document_to_dict(doc) for doc in docs1]
    
    # Create a document with the required fields
    doc = {
        'username': username,
        'session_id': session_id,
        'user_prompt': orderid,  # Store as a JSON string
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(documents_dicts, ensure_ascii=True),
        "filename":file,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res


def update_lastmodified_at(index_name, doc_id):
    # Get current timestamp in milliseconds
    current_timestamp = int(datetime.now().timestamp() * 1000)
    
    # Define the update body
    update_body = {
        "doc": {
            "lastmodified_at": current_timestamp  # Update with timestamp in milliseconds
        }
    }

    # Perform the update
    es.update(index=index_name, id=doc_id, body=update_body)
    
def update_create_at_with_sessionid(index_name,session_id):
    documents = search_by_session_id(index_name, session_id)
    # Update 'lastmodified_at' field for each matched document
    for doc in documents:
        doc_id = doc['_id']  # Get the document ID
        update_lastmodified_at(index_name, doc_id)

        

def resize_image(image_path, size=(224, 224)):
    with Image.open(image_path) as img:
        img.thumbnail(size, Image.LANCZOS)
        buffer = BytesIO()
        img.save(buffer, format="BMP")
        return buffer.getvalue()

def encode_image(image_data):
    return base64.b64encode(image_data).decode("utf-8")

def get_current_iuser_idso_date():
    return datetime.utcnow().isoformat()    

def store_chat_history(es, index_name, chat_entry):
    es.index(index=index_name, document=chat_entry)

def retrieve_chat_data(indexname,session_id):
    # Fetch data from Elasticsearch
    response = es.get(index=indexname, id=session_id)
    
    if response["found"]:
        chat_data = response["_source"]
        return chat_data["documents"], chat_data["chat_history"]
    else:
        return None, None



def chat_hist_img(base64_image,user_id,message,response,session_id,index_name_img,es):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        #"timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
        "img": base64_image
    }
    
    store_chat_history(es, index_name_img, chat_entry)
  
  
def img_message_to_dict(message):
    if isinstance(message, HumanMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "human", "data": {"content": content}}
    elif isinstance(message, AIMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "ai", "data": {"content": content}}
    else:
        raise ValueError(f"Unexpected message type: {type(message)}")



def img_message_from_dict(message_dict):
    # Check if the input is a list of messages
    if isinstance(message_dict, list):
        return [img_message_from_dict(m) for m in message_dict]
    
    message_type = message_dict.get("type")
    if message_type == "human":
        return HumanMessage(content=message_dict["data"]["content"])
    elif message_type == "ai":
        return AIMessage(content=message_dict["data"]["content"])
    else:
        raise ValueError(f"Got unexpected message type: {message_type}")    


def retrieve_chat_history(es, index_name, session_id):
    res = es.search(index=index_name, body={
        "query": {
            "match": {"session_id": session_id}
        },
        "size": 5000
    })
    
    return res['hits']['hits']

def chat_hist_txt(user_id,session_id,message,response,es,index_name):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        # "timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
    }
    
    store_chat_history(es, index_name, chat_entry)  
    
def encode_image1(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def avi_to_base64(file):
    with open(file, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def resize_and_convert_image(image_path):
    with Image.open(image_path) as img:
        # Resize the image if it exceeds a certain size (e.g., width or height > 1024)
        max_size = (1024, 1024)
        img.thumbnail(max_size, Image.LANCZOS)

        # Convert BMP images to JPEG
        if img.format == 'BMP':
            img = img.convert('RGB')  # Convert to RGB first
            converted_path = image_path.replace('.bmp', '.jpeg')
            img.save(converted_path, format='JPEG')
            os.remove(image_path)
            return converted_path
        else:
            # Save the resized image to a temporary file in the same format
            converted_path = image_path.replace(f".{img.format.lower()}", f".{img.format.lower()}")
            img.save(converted_path)
            return converted_path    
        
def add_documents(file_name1):
    _, extension_file_name1 = os.path.splitext(file_name1)
    resp=1
    docs1=''
    # Load the first file
    try:
        if extension_file_name1.lower() == ".xlsx":
            docs1 = document_loader.excel_file_loader(file_name1)
        elif extension_file_name1.lower() == ".csv":
            docs1 = document_loader.csv_file_loader(file_name1)
        elif extension_file_name1.lower() == ".tsv":
            docs1 = document_loader.tsv_file_loader(file_name1)
        elif extension_file_name1.lower() in [".doc", ".docx"]:
            docs1 = document_loader.doc_docx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pdf":
            docs1 = document_loader.pdf_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pptx":
            docs1 = document_loader.pptx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".xml":
            docs1 = document_loader.xml_file_loader(file_name1)
        elif extension_file_name1.lower() == ".json":
            docs1 = document_loader.json_file_loader(file_name1)
        elif extension_file_name1.lower() == ".txt":
            docs1 = document_loader.text_file_loader(file_name1)
        elif extension_file_name1.lower() == ".md":
            docs1 = document_loader.markdown_file_loader(file_name1)
        elif extension_file_name1.lower() == ".html":
            docs1 = document_loader.html_file_loader(file_name1)
        else:
            raise ValueError(f"Unsupported file type: {extension_file_name1}")
    except Exception as e:
        resp=0  

    return resp,docs1

def encode_file(file_path):
    with open(file_path, "rb") as file:
        return base64.b64encode(file.read()).decode("utf-8")
   
def image_upload(file_path,sessionid,username,indexname,candidate):
    filename=file_path.split('/')[-1]
    message = HumanMessage(
        content=[
            {"type": "text", "text": f"Extracted text in the {filename} image"},
            {
                "type": "image_url",
                "image_url": {"url": f"data:image/jpeg;base64,{encode_file(file_path)}"},
            },
        ],
    )
    logger.info('img up entered')    
    image_conv=message_to_dict_img(message)
    logger.info('dict conv completed')
    index_chat_message(sessionid, username, [image_conv],indexname,candidate) 
    logger.info('check erro in im up')
    return HumanMessage(
        content=[
            {
                "type": "image_url",
                "image_url": {"url": f"data:image/jpeg;base64,{encode_file(file_path)}"},
            },
        ],
    )



def index_chat_message(sessionid, username, new_content,indexname,candidate):
    # Retrieve the existing document
    try:
        doc = es.get(index=indexname, id=sessionid)
        existing_messages = doc['_source']['messages']
    except elasticsearch.NotFoundError:
        # Document does not exist, so start with an empty list
        existing_messages = []
    # Append the new content to the existing messages
    existing_messages.append({
        "data": {
            "content": new_content
        }
    })

    # Update the document with the appended messages
    es.update(
        index=indexname,
        id=sessionid,
        body={
            "doc": {  # Update the document fields
                "session_id": sessionid,
                "username": username,
                "created_at": round(time() * 1000),
                "lastmodified_at": round(time() * 1000),
                "messages": existing_messages,
                'candidate_name':candidate,
            },
            "doc_as_upsert": True  # If the document doesn't exist, create it
        }
    )
    

def message_to_dict_list(hist):
    hist_list=[]
    for history in hist:
        if history.type == 'human':
            hist_list.append({'type': 'human', 'data':history.content})
        elif history.type == 'ai':
            ai_hist = history.content
            ai_hist_content = [{'type': 'text', 'text':f"{ai_hist}"}]
            hist_list.append({'type': 'ai', 'data':ai_hist_content}) 
        elif history.type == 'system':
            hist_list.append({'type': 'system', 'data':history.content})
    return hist_list       

def message_to_dict_img(hist):
        if hist.type == 'human':
            cont=hist.content
            return {'type': 'human', 'data':cont}
        elif hist.type == 'ai':
            cont=hist.content
            ai_hist_content = [{'type': 'text', 'text':f"{cont}"}]
            return {'type': 'ai', 'data':ai_hist_content}
        elif hist.type == 'system':
            cont=hist.content
            return {'type': 'system', 'data':cont}
            


def message_from_dict_list(his_list):
    con_list = []
    for i in his_list:
        if isinstance(i, dict):  # Ensure the item is a dictionary
            if i.get('type') == 'human':
                human_content = HumanMessage(i.get('data'))
                con_list.append(human_content)
            elif i['data'].get('type') == 'ai':
                ai_data = i.get('data', [])
                if ai_data and isinstance(ai_data, list):
                    ai_con = AIMessage(ai_data[0].get('text', ''))
                    con_list.append(ai_con)
        elif isinstance(i, list):  # Ensure the item is a dictionary
           for lt in i:
                if lt.get('type') == 'human':
                   human_content = HumanMessage(lt.get('data'))
                   con_list.append(human_content)
                elif lt.get('type') == 'ai':
                    ai_content = AIMessage(lt.get('data'))
                    con_list.append(ai_content)
                elif lt.get('type') == 'system':
                    sys_content = SystemMessage(lt.get('data'))
                    con_list.append(sys_content)

    return con_list 

def retrieve_chat_content(sessionid, indexname):
    # Query Elasticsearch to retrieve documents matching chat_id and user_id
    response = es.search(
        index=indexname,
        body={
            "query": {
                "bool": {
                    "must": [
                        {"match": {"session_id": sessionid}},
                    ]
                }
            }
        }
    )

    contents = []
    # Check if any documents are found
    if response['hits']['total']['value'] > 0:
        for hit in response['hits']['hits']:
            document = hit['_source']
            for message in document['messages']:
                content = message['data']['content']
                contents.append(content)

    return contents if contents else None
    


def query_and_combine(past_messages, user_query):
    llm = ChatOpenAI(
            model="gpt-4o",
        )
    # Specify the search type (e.g., 'similarity' or 'exact_match')
    search_type = 'similarity'  # Adjust this based on your requirements
    
    # Use the search method with the search_type argument
    retrieved_data = db.search(user_query, search_type=search_type)

    # Format the retrieved data
    formatted_data = f"{retrieved_data}"
    
    # Combine past messages with the retrieved data
    combined_messages = past_messages + [
        AIMessage(content=formatted_data)
    ]
    
    # Pass the combined data to ChatOpenAI
    response = llm.invoke(combined_messages)
    return response

def delete_document_content(index_name, session_id,condition_check):
    # Define the query to match documents with the given session_id and user_prompt as 'upload1'
    query = {
        "query": {
            "bool": {
                "must": [
                    {"match": {"session_id": session_id}},
                    {"match": {"user_prompt": condition_check}}
                ]
            }
        }
    }

    # Perform the delete_by_query operation
    delete_response = es.delete_by_query(index=index_name, body=query)
    
    # Check if any documents were deleted
    deleted_count = delete_response['deleted']
    
    if deleted_count > 0:
        print(f"Successfully deleted {deleted_count} documents with session_id '{session_id}' and user_prompt 'upload1'.")
    else:
        print("No documents found with the provided session ID and user_prompt 'upload1'.")

def num_tokens_from_string(string: str, model: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.encoding_for_model(model)
    num_tokens = len(encoding.encode(string))
    return num_tokens

def dict_to_tuple(d):
    """Convert dictionary to a tuple that can be hashed."""
    if isinstance(d, dict):
        return tuple((k, dict_to_tuple(v)) for k, v in sorted(d.items()))
    elif isinstance(d, list):
        return tuple(dict_to_tuple(i) for i in d)
    else:
        return d
    
def fix_syntax_error(text):
    # Replace all double quotes with escaped double quotes
    fixed_text = text.replace('"', '\\"')
    return fixed_text


def user_org_tokens(org_name, username, input_tokens, output_tokens,mailid):
    # Search for documents where organization_name matches
    corp_orgid=None
    user_tokens = es.search(
        index=user_details_index,
        body={
            'query': {
                'bool': {
                    'must': [
                        {'match': {'orgname': org_name}}
                    ],
                    'should': [
                        {'match': {'username': username}},
                        {'bool': {'must_not': {'exists': {'field': 'username'}}}}
                    ]
                }
            }
        }
    )['hits']['hits']

    # Check if any document was found for the organization
    if user_tokens:
        username_found = False
        for doc in user_tokens:
            doc_id = doc['_id']
            doc_source = doc['_source']

            # Check if username matches the current document
            if doc_source.get('username') == username:
                username_found = True

                # Retrieve existing input/output tokens with default values
                inp = doc_source.get('input_tokens', 0)
                outp = doc_source.get('output_tokens', 0)

                # Update the token values
                input_overall_tokens = input_tokens + inp
                output_overall_tokens = output_tokens + outp    
                overall_tokens = input_overall_tokens + output_overall_tokens
                
                logger.info('input_overall_tokens %s'%input_overall_tokens)
                logger.info('output_overall_tokens %s'%output_overall_tokens)
                logger.info('overall_tokens %s'%overall_tokens)
                
                # Prepare the updated document
                updated_doc = {
                    'doc': {
                        'username': username,
                        'mail':mailid,
                        'input_tokens': input_overall_tokens,
                        'output_tokens': output_overall_tokens,
                        'overall_tokens': overall_tokens,
                        'orgname': org_name,
                        'limits_token':1

                    }
                }

                # Update the document in Elasticsearch
                es.update(index=user_details_index, id=doc_id, body=updated_doc)
                print(f"Document updated for username: {username}")
                corp_orgid=doc_id
                break

        # If no document with the username is found but organization exists, create a new document
        if not username_found:
            new_doc = {
                'username': username,
                'mail':mailid,
                'input_tokens': input_tokens,
                'output_tokens': output_tokens,
                'overall_tokens': input_tokens + output_tokens,
                'orgname': org_name,
                'limits_token':1,
                'assigned_tokens':500
            }
            result=es.index(index=user_details_index, body=new_doc)
            print(f"New document created for username: {username}")
            corp_orgid = result['_id']
        return corp_orgid

    else:
        logger.info(f"No document found for organization: {org_name}")
        # new_doc = {
        #     'username': username,
        #     'mail':mailid,
        #     'input_tokens': input_tokens,
        #     'output_tokens': output_tokens,
        #     'overall_tokens': input_tokens + output_tokens,
        #     'orgname': org_name,
            # 'limits_token':1,
            # 'assigned_tokens':500
        # }
        # result=es.index(index=user_details_index, body=new_doc)
        # print(f"New document created for username: {username}")
        # corp_orgid = result['_id']
        return corp_orgid

        
        
def org_token(input_tokens, output_tokens, org_name):
    # Search for the document with the given organization name
    org_toks = es.search(
        index=org_details_index,
        body={
            'query': {
                'match': {
                    'organization_name': org_name
                }
            }
        }
    )['hits']['hits']
    if org_toks:
        for doc in org_toks:
            doc_id = doc['_id']
            doc_source = doc['_source']
        # Retrieve existing input/output tokens with default values
        inp = doc_source.get('input_overall_tokens', 0)
        outp = doc_source.get('output_overall_tokens', 0)
        assigned_tokens = doc_source.get("assigned_tokens", 0)

 
        input_overall_tokens = input_tokens + inp
        output_overall_tokens = output_tokens + outp
        overall_tokens = input_overall_tokens + output_overall_tokens
        # This is hard coded now but have to change dynamically change
        # tokens_usable=100000
        percentage=floor((overall_tokens/assigned_tokens)*100)
 
 
        # Document to be updated
        update_doc = {
            'doc': {
                'output_overall_tokens': output_overall_tokens,
                'input_overall_tokens': input_overall_tokens,
                'overall_tokens': overall_tokens,
                'organization_name': org_name,
                'percenatge': percentage,
            }
        }

 
        # Update the document using its ID
        res = es.update(index=org_details_index, id=doc_id, body=update_doc)
        

@csrf_exempt
def moderate_input(text):
    """
    Check user input using the Moderation API.
    Returns a tuple (is_safe, flagged_categories).
    """
    try:

        client = OpenAI()
 
        response = client.moderations.create(
            model="omni-moderation-latest",
            input=text,
        )
        a=response.to_dict()
        flagged = a["results"][0]["flagged"]
        categories = a["results"][0]["categories"]
        return not flagged, categories if flagged else {}
    except Exception as e:
        # logger.exception(f"Moderation API failed: {e}")
        print(f"Moderation API failed: {e}")
        return True, {}  # Assume safe if API fails

@csrf_exempt
def allibot(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            # request.session.clear()
            if action_type == 'action1':
                username =request.POST.get('username')
                session_id=request.POST.get('session_id')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')

                user_prompt = request.POST.get('message')
                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4_0125_preview = num_tokens_from_string(user_prompt, 'gpt-4-0125-preview') 
                is_safe, flagged_categories = moderate_input(user_prompt)
                document_id_user=add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4_0125_preview,'gpt-4-0125-preview',is_safe, flagged_categories)

                if is_safe:                
                    template = """Answer the question in your own words as truthfully as possible from the context given to you. If there is any MCQ question explain why the choice is correct.
                    If you do not know the answer to the question, simply respond with "I don't know. Can you ask another question".
                    Response must be in and around 200 words. it must be in paragraph manner and it must not exceed 4 paragraphs.
                    Give a line of space between the paragraphs.
                    If questions are asked where there is no relevant context available, simply respond with "That's a great question! I'm not sure of the answer right now.  Can you ask your question a different way? I am excited to assist you further!"
                    Context: {context}
            
            
                    {chat_history}
                    Human: {question}
                    Assistant:"""
    
                    prompt = PromptTemplate(input_variables=["context", "chat_history", "question"], template=template)
                
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
                    
                    sleep(1)                            
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": session_id}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        items = []
        
                    session_chat_hist=messages_from_dict(items)
                    if session_chat_hist:
                        for i in range(0,len(session_chat_hist)):
                            if type(session_chat_hist[i]) == HumanMessage:
                                memory.chat_memory.add_user_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == AIMessage:
                                memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                                
                    #gpt-4o
                    #gpt-4-0613
                    
                    qa = ConversationalRetrievalChain.from_llm(
                        llm=ChatOpenAI(model="gpt-4-0125-preview"),retriever=db.as_retriever(search_kwargs={"k": 4}), memory=memory,
                        combine_docs_chain_kwargs={'prompt': prompt},return_source_documents=True)
            
                    formatted_response = qa(user_prompt)
                    answer=formatted_response['answer']
                    output_token_cost_gpt4_0125_preview = num_tokens_from_string(answer, 'gpt-4-0125-preview')
                    
                    query = {
                        "query": {
                            "bool": {
                                "must": [
                                    # {"term": {"user_id.keyword": alliance_number}},
                                    {"term": {"username": candidate}},
                                ]
                            }
                        }
                    }
    
                    # Search the index for matching records
                    response = es.search(index=user_details_index, body=query)
                    # print('response',response)
                    # Variable to hold the conversation activation state
                    # activate_conv = True
                    assigned_tokens=0
                    overall_tokens=0
                    # Process the results
                    for hit in response["hits"]["hits"]:
                        assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                        overall_tokens = hit["_source"].get("overall_tokens", 0)
    
                        
                    print('assigned_tokens',assigned_tokens)
                    print('oup',output_token_cost_gpt4_0125_preview)
                    if assigned_tokens!=0 and overall_tokens !=0:
    
                        if assigned_tokens-(overall_tokens+output_token_cost_gpt4_0125_preview+input_token_cost_gpt4_0125_preview) <0:
                            print('if satisfied',candidate)
                            print('user_details_index',user_details_index)
    
                            response = es.search(
                            index=user_details_index,
                            body={
                                    "query": {
                                        "bool": {
                                            "must": [
                                                # {"term": {"user_id.keyword": alliance_number}},
                                                {"term": {"username": candidate}},
                                            ]
                                        }
                                    },
                                }
                            )
                        
                            if response['hits']['total']['value'] > 0:
                                userdoc_id = response['hits']['hits'][0]['_id']
                                print('userdoc_id',userdoc_id)
                                # Use script to either add or update 'limits_token'
                                es.update(
                                    index=user_details_index,
                                    id=userdoc_id,
                                    body={
                                        "script": {
                                            "source": """
                                                if (ctx._source.containsKey('limits_token')) {
                                                    // If limits_token exists, update its value
                                                    ctx._source.limits_token = params.limits_token;
                                                } else {
                                                    // If limits_token doesn't exist, create it and set the value
                                                    ctx._source['limits_token'] = params.limits_token;
                                                }
                                            """,
                                            "params": {
                                                "limits_token": 0
                                            }
                                        }
                                    }
                                )
        
                            
                            # Search the index for matching records
                            response = es.search(index=user_details_index, body=query)
        
                            return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                        else:
                            add_assistant_message_to_es(candidate,username,session_id, answer,output_token_cost_gpt4_0125_preview,'gpt-4-0125-preview')
                            org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)
                            org_user_docid=user_org_tokens(org_name,candidate,input_token_cost_gpt4_0125_preview,output_token_cost_gpt4_0125_preview,mailid)
            
                            source=formatted_response['source_documents']
                            meta_data=[]
                            for sor in source:
                                meta_data.append(sor.metadata)
                            unique_entries_mod_str = unique_sources_with_pages(meta_data)
                            try:
                                
                                json_str=dumps(''.join([doc._lc_kwargs['page_content']+'_lc_kwargs' for doc in formatted_response['source_documents']]))         
                                user_ques=dumps(user_prompt)
                                answer_prompt=dumps(answer)
                                unique_entries_str=dumps(unique_entries_mod_str)
                                cmd=f"python3 /var/www/html/AlliBotV5_chatbot/AlliBotV50/Quesgen_answer_relevancy.py {user_ques} {answer_prompt} {json_str} {unique_entries_str} {document_id_user} {org_user_docid}" 
                                # logger.info(cmd)
                                os.popen(cmd)
                            except Exception as e:
                                logger.exception('Exception occured due to %s' % e)
                                
                            return JsonResponse({'response': answer,'session_id':session_id})
                        
                    else:
                        add_assistant_message_to_es(candidate,username,session_id, answer,output_token_cost_gpt4_0125_preview,'gpt-4-0125-preview')
                        org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)
                        org_user_docid=user_org_tokens(org_name,candidate,input_token_cost_gpt4_0125_preview,output_token_cost_gpt4_0125_preview,mailid)
        
                        source=formatted_response['source_documents']
                        meta_data=[]
                        for sor in source:
                            meta_data.append(sor.metadata)
                        unique_entries_mod_str = unique_sources_with_pages(meta_data)
                        try:
                            
                            json_str=dumps(''.join([doc._lc_kwargs['page_content']+'_lc_kwargs' for doc in formatted_response['source_documents']]))         
                            user_ques=dumps(user_prompt)
                            answer_prompt=dumps(answer)
                            unique_entries_str=dumps(unique_entries_mod_str)
                            cmd=f"python3 /var/www/html/AlliBotV5_chatbot/AlliBotV50/Quesgen_answer_relevancy.py {user_ques} {answer_prompt} {json_str} {unique_entries_str} {document_id_user} {org_user_docid}" 
                            # logger.info(cmd)
                            os.popen(cmd)
                        except Exception as e:
                            logger.exception('Exception occured due to %s' % e)
                            
                        return JsonResponse({'response': answer,'session_id':session_id})
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })    
            

    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        return JsonResponse({'error': str(e)})
    
@csrf_exempt
def doccompare(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            if action_type == 'action1':          
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')

                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4o,'gpt-4o',is_safe, flagged_categories)
                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you.
                    You are a helpful assistant. Include the filename within your response where relevant.
                    Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                    Give a line of space between the paragraphs.
    
                    If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.
    
                    If questions are asked where there is no relevant context available, simply respond with:
                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
    
                    Context: {context}  
    
                    {chat_history}  
                    Human: {question}  
                    Assistant:"""
                    
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
    
                    sleep(2)
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                        size=1000,
                    )
    
                    if result and len(result["hits"]["hits"]) > 0:
                        items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        items = []                
                        
                    #session_chat_hist = messages_from_dict(items)
     
                    session_chat_hist=messages_from_dict(items)
                    if session_chat_hist:
                        for i in range(0,len(session_chat_hist)):
                            if type(session_chat_hist[i]) == HumanMessage:
                                memory.chat_memory.add_user_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == AIMessage:
                                memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                                
                               
                    result = es.search(
                        index=doc_index,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                    )
                    
                    if result and len(result["hits"]["hits"]) > 0:
                        doc_items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        doc_items = []   
                        
                    uploaded_document_docs = []
                    if doc_items:
                        for i in doc_items:
                            page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""
        
                            # Ensure metadata is a dictionary
                            metadata = i[0].get('metadata')
                            if not isinstance(metadata, dict):
                                # Handle the case where metadata is not a dict
                                # Example: convert to dict if it's a stringified JSON
                                try:
                                    metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                                except (json.JSONDecodeError, TypeError):
                                    metadata = {}
        
                            uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
                
                    db = ElasticsearchStore(
                        es_connection=es,
                        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
                        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
                        embedding=embeddings,
                        strategy=ElasticsearchStore.ExactRetrievalStrategy()
                    )
                                
                    primary_docs= db.as_retriever(search_kwargs={"k": 2}).get_relevant_documents(user_prompt)
                    primary_docs_text = []
    
                    for doc in primary_docs:
                        # Parse the JSON content from page_content
                        primary_docs_text.append(BaseDocument(page_content=doc.page_content))  # or handle differently
    
    
                    combined_docs = uploaded_document_docs + primary_docs_text
    
                    client = OpenAI()
    
                    memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
                    chat_history = memory_variables['chat_history']
                    formatted_template = template.format(
                        context=combined_docs, 
                        chat_history=chat_history,
                        question=user_prompt
                    )
    
    
                    # Stream the response
                    response = client.chat.completions.create(
                        model="gpt-4o",
                        messages=[{"role": "user", "content": formatted_template}],
                        stream=False,
                    )
    
                    assistant_response = response.choices[0].message.content
                    output_token_cost_gpt4o =  num_tokens_from_string(assistant_response,'gpt-4o') 
                    
                    query = {
                        "query": {
                            "bool": {
                                "must": [
                                    # {"term": {"user_id.keyword": alliance_number}},
                                    {"term": {"username": candidate}},
                                ]
                            }
                        }
                    }
    
                    # Search the index for matching records
                    response = es.search(index=user_details_index, body=query)

                    assigned_tokens=0
                    overall_tokens=0
                    # Process the results
                    for hit in response["hits"]["hits"]:
                        assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                        overall_tokens = hit["_source"].get("overall_tokens", 0)
    
                    if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
                        print('if satisfied',candidate)
                        print('user_details_index',user_details_index)

                        response = es.search(
                        index=user_details_index,
                        body={
                                "query": {
                                    "bool": {
                                        "must": [
                                            # {"term": {"user_id.keyword": alliance_number}},
                                            {"term": {"username": candidate}},
                                        ]
                                    }
                                },
                            }
                        )
                    
                        if response['hits']['total']['value'] > 0:
                            userdoc_id = response['hits']['hits'][0]['_id']
                            print('userdoc_id',userdoc_id)
                            # Use script to either add or update 'limits_token'
                            es.update(
                                index=user_details_index,
                                id=userdoc_id,
                                body={
                                    "script": {
                                        "source": """
                                            if (ctx._source.containsKey('limits_token')) {
                                                // If limits_token exists, update its value
                                                ctx._source.limits_token = params.limits_token;
                                            } else {
                                                // If limits_token doesn't exist, create it and set the value
                                                ctx._source['limits_token'] = params.limits_token;
                                            }
                                        """,
                                        "params": {
                                            "limits_token": 0
                                        }
                                    }
                                }
                            )
    
                        
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)
    
                        return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                    else:
                        add_assistant_message_to_es(candidate,username,session_id, assistant_response,output_token_cost_gpt4o,'gpt-4o')
                        org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
                        user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)                            
                        return JsonResponse({'response': assistant_response,'session_id':session_id})
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })    
                
            elif action_type == 'action2':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/upload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                # ind_response=start_indexing(upload_dir,imtext_index,conver_id,username)
                delete_document_content(doc_index,conver_id,'upload1')

                ind_response1,docs1=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                if docs1:
                    for doc in docs1:
                        doc.page_content = {
                            f"context of the first uploaded {file} document": doc.page_content
                        }
                    add_document_ai_to_es( username, conver_id, docs1,doc_index,'upload1',f"{file}")
                for f in files_dir:
                    os.remove(f)
                    
                if ind_response1==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload1 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            
            elif action_type == 'action3':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/upload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                
                delete_document_content(doc_index,conver_id,'upload2')
                files_dir = glob(upload_dir+'/*')
                ind_response2,docs2=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                if docs2:
                    for doc in docs2:
                        doc.page_content = {
                            f"context of the second uploaded {file} document": doc.page_content
                        }
                    add_document_ai_to_es( username, conver_id, docs2,doc_index,'upload2',f"{file}")
                
                for f in files_dir:
                    os.remove(f)
                if ind_response2==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload2 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})

    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        return JsonResponse({'error': str(e)})
    
@csrf_exempt
def imgtotext(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            mailid=request.session.get('email')
            # global session_id
            llm = ChatOpenAI(
                    model="gpt-4o",
                )
            # request.session.clear()
            upload_dir = os.path.join(os.path.dirname(__file__))+'/image_pth'
            if not os.path.exists(upload_dir):
                os.makedirs(upload_dir)
            if action_type == 'action1':

                username =request.POST.get('username')

                session_id=request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                files = request.POST.getlist('file_names[]')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')


                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4o,'gpt-4o',is_safe, flagged_categories)
                if is_safe:
                    try: 
                        if glob(upload_dir+'/*') and files:
                            a_with_path = [upload_dir+'/'+ filename for filename in files]
                            if a_with_path:
                                ai_type_conv=[]
                                img_only_resp=[]
                                for i in a_with_path:
                                    if i.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')):
                                        logger.info('entered initial stage')
                                        encodeimg=image_upload(i,session_id,username,imtext_index,candidate)
                                        logger.info('issue with upload status')
                                        response = llm.invoke([encodeimg])
                                        logger.info('response got %s'%response)
                                        ai_type_conv.append(message_to_dict_img(response))
                                        img_only_resp.append(response.content)
                                        
                                    else:
                                        ind_response1,docs1=add_documents(i)
                                        file=i.split('/')[-1]
                                        formatted_data = [item.page_content for item in docs1]
                                        # Ensure the text is passed correctly in the content
                                        message = HumanMessage(
                                            content=[
                                                {"type": "text", "text": f"Filename: {file}"},
                                                {"type": "text", "text": f"Here is the text content of the uploaded document {file}:\n" + '\n'.join(formatted_data)}
                                            ]
                                        )
                                        img_doc_cont=message_to_dict_img(message)
                                        index_chat_message(session_id, username, [img_doc_cont],imtext_index,candidate)
                                        static_content="Could you please clarify the specific operations you'd like to perform on the uploaded documents? If you need further assistance or have additional questions, feel free to ask"
                                        ai_type_conv.append(message_to_dict_img(AIMessage(content=static_content)))
                                        img_only_resp.append(static_content)
    
                    except Exception as e:
                        logger.info('Exception occured in the image upload %s'%e)
                    
                    files_dir = glob(upload_dir+'/*')
                    for f in files_dir:
                        os.remove(f)
                    if user_prompt:
                        sleep(3)
                        ret_chat = retrieve_chat_content(session_id,imtext_index)
                        # Checking if the document with that chat_id and user_id, if not create a new list 
                        if ret_chat:
                            converted_chat = ret_chat
                            converted_chat = message_from_dict_list(converted_chat)
                        else:
                            converted_chat = []
                        
                        # Convert the question to HumanMessage function    
                        user_input = HumanMessage(content=[{"type": "text", "text": user_prompt}])
                        
                        # Combining both retrieved chat history(converted_chat) and user_input
                        search_type = 'similarity'  # Adjust this based on your requirements
                        
                        # Use the search method with the search_type argument
                        retrieved_data = db.search(user_prompt, search_type=search_type)
                        formatted_data = [
                            {"type": "text", "text": item.page_content} 
                            for item in retrieved_data 
                            if item.page_content.strip() != '{"text": " "}'  # Only add if page_content is not just whitespace or empty
                        ]
                        # Create the AIMessage instance
                        
                        ai_message = HumanMessage(content=formatted_data)
        
                        # Now you can combine this with the past messages
                        if formatted_data:
                            all_chat = converted_chat + [ai_message]+[user_input]
                        else:
                            all_chat = converted_chat + [user_input]
                           
                        # llm = ChatOpenAI(model="gpt-4o")
                        img_response = llm.invoke(all_chat)
                        
                        # Converting retrieved chat history and the recent user question to dictionary form to store in ElasticSearch
                        history = message_to_dict_list([user_input])
                        
                        
                        # Converting the response to dictionary and appending to the history
                        history.append(message_to_dict_img(img_response))
                        
                        # Updating the current conversation to an already existing document 
                        index_chat_message(session_id,username, history,imtext_index,candidate)
                        output_token_cost_gpt4o =  num_tokens_from_string(img_response.content, 'gpt-4o') 
                        
                        query = {
                            "query": {
                                "bool": {
                                    "must": [
                                        # {"term": {"user_id.keyword": alliance_number}},
                                        {"term": {"username": candidate}},
                                    ]
                                }
                            }
                        }
        
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)

                        assigned_tokens=0
                        overall_tokens=0
                        # Process the results
                        for hit in response["hits"]["hits"]:
                            assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                            overall_tokens = hit["_source"].get("overall_tokens", 0)
        
                        if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
                            print('if satisfied',candidate)
                            print('user_details_index',user_details_index)

                            response = es.search(
                            index=user_details_index,
                            body={
                                    "query": {
                                        "bool": {
                                            "must": [
                                                # {"term": {"user_id.keyword": alliance_number}},
                                                {"term": {"username": candidate}},
                                            ]
                                        }
                                    },
                                }
                            )
                        
                            if response['hits']['total']['value'] > 0:
                                userdoc_id = response['hits']['hits'][0]['_id']
                                print('userdoc_id',userdoc_id)
                                # Use script to either add or update 'limits_token'
                                es.update(
                                    index=user_details_index,
                                    id=userdoc_id,
                                    body={
                                        "script": {
                                            "source": """
                                                if (ctx._source.containsKey('limits_token')) {
                                                    // If limits_token exists, update its value
                                                    ctx._source.limits_token = params.limits_token;
                                                } else {
                                                    // If limits_token doesn't exist, create it and set the value
                                                    ctx._source['limits_token'] = params.limits_token;
                                                }
                                            """,
                                            "params": {
                                                "limits_token": 0
                                            }
                                        }
                                    }
                                )
        
                            
                            # Search the index for matching records
                            response = es.search(index=user_details_index, body=query)
        
                            return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                        else:
                            
                            add_assistant_message_to_es(candidate,username,session_id,img_response.content,output_token_cost_gpt4o,'gpt-4o')
                            org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
                            user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
                            return JsonResponse({'response': img_response.content,'session_id':session_id})
                        
                    else:
                        unique_data = []
                        seen = set()
    
                        for d in ai_type_conv:
                            dict_tuple = dict_to_tuple(d)
                            if dict_tuple not in seen:
                                unique_data.append(d)
                                seen.add(dict_tuple)
    
                        for i in range(0,len(unique_data)):
                            index_chat_message(session_id,username, [unique_data[i]],imtext_index,candidate)
                        only_files_response='\n\n'.join(list(set(img_only_resp)))
                        output_token_cost_gpt4o =  num_tokens_from_string(only_files_response, 'gpt-4o') 
                        
                        query = {
                            "query": {
                                "bool": {
                                    "must": [
                                        # {"term": {"user_id.keyword": alliance_number}},
                                        {"term": {"username": candidate}},
                                    ]
                                }
                            }
                        }
        
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)

                        assigned_tokens=0
                        overall_tokens=0
                        # Process the results
                        for hit in response["hits"]["hits"]:
                            assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                            overall_tokens = hit["_source"].get("overall_tokens", 0)
        
                        if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
                            print('if satisfied',candidate)
                            print('user_details_index',user_details_index)

                            response = es.search(
                            index=user_details_index,
                            body={
                                    "query": {
                                        "bool": {
                                            "must": [
                                                # {"term": {"user_id.keyword": alliance_number}},
                                                {"term": {"username": candidate}},
                                            ]
                                        }
                                    },
                                }
                            )
                        
                            if response['hits']['total']['value'] > 0:
                                userdoc_id = response['hits']['hits'][0]['_id']
                                print('userdoc_id',userdoc_id)
                                # Use script to either add or update 'limits_token'
                                es.update(
                                    index=user_details_index,
                                    id=userdoc_id,
                                    body={
                                        "script": {
                                            "source": """
                                                if (ctx._source.containsKey('limits_token')) {
                                                    // If limits_token exists, update its value
                                                    ctx._source.limits_token = params.limits_token;
                                                } else {
                                                    // If limits_token doesn't exist, create it and set the value
                                                    ctx._source['limits_token'] = params.limits_token;
                                                }
                                            """,
                                            "params": {
                                                "limits_token": 0
                                            }
                                        }
                                    }
                                )
        
                            
                            # Search the index for matching records
                            response = es.search(index=user_details_index, body=query)
        
                            return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                        else:
                            add_assistant_message_to_es(candidate,username,session_id,only_files_response,output_token_cost_gpt4o,'gpt-4o')
                            org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
                            user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
                            return JsonResponse({'response': only_files_response,'session_id':session_id})
                        
                        
                else:
                       return JsonResponse({
                           'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                           'session_id':session_id,
                           'flagged_categories': flagged_categories
                       })    
            
            elif action_type == 'action2':
                files = request.FILES.getlist('files')
                # conver_id=request.POST.get('session_id')
                # user=request.POST.get('username')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)


                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                # ind_response=Image_indexing(upload_dir,'image_datas',conver_id)
                try:
                    image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
    
                    # Use glob to get all supported image files (jpg, jpeg, png, bmp)
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
    
                    # Iterate through each image file
                    for image_file in image_files:
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                    ind_response=1

                except Exception as e:
                    ind_response=0
                if ind_response==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})
                else:
                    res_msg='Unable to process image.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})

    except Exception as e:
            logger.exception('Exception occured due to %s' % e)
            return JsonResponse({'error': str(e)})
      
 
def get_resptable(formatted_template):
    client = OpenAI()
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": formatted_template}],
        stream=False,
    )
    
    assistant_response = response.choices[0].message.content   
    return assistant_response
    
@csrf_exempt
def formscompare(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            if action_type == 'action1':          
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')


                # user_prompt = 'give me the policy number,policy effective date,coverage,Declarations,Endorsements of each uploaded documents and policy number was placed on the footer of each page and return output in table format with column name as each document name and row field are policy number,policy effective date,coverage,Declarations,Endorsements'#request.POST.get('message')
                # sleep(100)

                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4o,'gpt-4o',is_safe, flagged_categories)
                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you.
                    You are a helpful assistant. Include the filename within your response where relevant.
                    Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                    Give a line of space between the paragraphs.

                    If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.

                    If questions are asked where there is no relevant context available, simply respond with:
                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"

                    Context: {context}  

                    {chat_history}  
                    Human: {question}  
                    Assistant:"""
                    
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
    
                    # sleep(2)
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                        size=1000,
                    )
    
                    if result and len(result["hits"]["hits"]) > 0:
                        items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        items = []                
                        
                    #session_chat_hist = messages_from_dict(items)
     
                    session_chat_hist=messages_from_dict(items)
                    if session_chat_hist:
                        for i in range(0,len(session_chat_hist)):
                            if type(session_chat_hist[i]) == HumanMessage:
                                memory.chat_memory.add_user_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == AIMessage:
                                memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == ChatMessage:
                                memory.chat_memory.add_user_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == SystemMessage:
                                memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                                
                                
                               
                    result = es.search(
                        index=forms_index,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                    )
                    
                    if result and len(result["hits"]["hits"]) > 0:
                        doc_items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        doc_items = []   
                        
                    uploaded_document_docs = []
                    if doc_items:
                        for i in doc_items:
                            page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""
        
                            # Ensure metadata is a dictionary
                            metadata = i[0].get('metadata')
                            if not isinstance(metadata, dict):
                                # Handle the case where metadata is not a dict
                                # Example: convert to dict if it's a stringified JSON
                                try:
                                    metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                                except (json.JSONDecodeError, TypeError):
                                    metadata = {}
        
                            uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
                
                    db = ElasticsearchStore(
                        es_connection=es,
                        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
                        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
                        embedding=embeddings,
                        strategy=ElasticsearchStore.ExactRetrievalStrategy()
                    )
                                
                    primary_docs= db.as_retriever(search_kwargs={"k": 2}).get_relevant_documents(user_prompt)
                    primary_docs_text = []
    
                    for doc in primary_docs:
                        # Parse the JSON content from page_content
                        primary_docs_text.append(BaseDocument(page_content=doc.page_content))  # or handle differently
    
                    # combined_docs = uploaded_document_docs 
                    # print('primary_docs_text length',len(primary_docs_text))
                    # print('combined_docs length',len(uploaded_document_docs))

                    combined_docs = uploaded_document_docs + primary_docs_text
                    # print('combined_docs alli knowledge',combined_docs)
                    client = OpenAI()
    
                    memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
                    chat_history = memory_variables['chat_history']
                    # print('chat_history',chat_history)
                    formatted_template = template.format(
                        context=combined_docs, 
                        chat_history=chat_history,
                        question=user_prompt
                    )
    
    
                    # Stream the response
                    response = client.chat.completions.create(
                        model="gpt-4o",
                        messages=[{"role": "user", "content": formatted_template}],
                        stream=False,
                    )
    
                    assistant_response = response.choices[0].message.content                   
                    output_token_cost_gpt4o =  num_tokens_from_string(assistant_response,'gpt-4o') 
                    query = {
                        "query": {
                            "bool": {
                                "must": [
                                    # {"term": {"user_id.keyword": alliance_number}},
                                    {"term": {"username": candidate}},
                                ]
                            }
                        }
                    }
    
                    # Search the index for matching records
                    response = es.search(index=user_details_index, body=query)

                    assigned_tokens=0
                    overall_tokens=0
                    # Process the results
                    for hit in response["hits"]["hits"]:
                        assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                        overall_tokens = hit["_source"].get("overall_tokens", 0)
                    
                    print('assigned_tokens',assigned_tokens)
                    print('overall_tokens',overall_tokens)

                    if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
                        print('if satisfied',candidate)
                        print('user_details_index',user_details_index)

                        response = es.search(
                        index=user_details_index,
                        body={
                                "query": {
                                    "bool": {
                                        "must": [
                                            # {"term": {"user_id.keyword": alliance_number}},
                                            {"term": {"username": candidate}},
                                        ]
                                    }
                                },
                            }
                        )
                    
                        if response['hits']['total']['value'] > 0:
                            userdoc_id = response['hits']['hits'][0]['_id']
                            print('userdoc_id',userdoc_id)
                            # Use script to either add or update 'limits_token'
                            es.update(
                                index=user_details_index,
                                id=userdoc_id,
                                body={
                                    "script": {
                                        "source": """
                                            if (ctx._source.containsKey('limits_token')) {
                                                // If limits_token exists, update its value
                                                ctx._source.limits_token = params.limits_token;
                                            } else {
                                                // If limits_token doesn't exist, create it and set the value
                                                ctx._source['limits_token'] = params.limits_token;
                                            }
                                        """,
                                        "params": {
                                            "limits_token": 0
                                        }
                                    }
                                }
                            )
    
                        
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)
    
                        return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                    else:
                        add_assistant_message_to_es(candidate,username,session_id, assistant_response,output_token_cost_gpt4o,'gpt-4o')
                        org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
                        user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
                        return JsonResponse({'response': assistant_response,'session_id':session_id})
                    
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })    
                
            elif action_type == 'action2':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/formsupload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                # ind_response=start_indexing(upload_dir,imtext_index,conver_id,username)
                delete_document_content(forms_index,conver_id,'upload1')
                
                image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
                if image_files:
                # Use glob to get all supported image files (jpg, jpeg, png, bmp)
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
                    chain = ChatOpenAI(openai_api_key=config('SECRET_KEY'), model="gpt-4o", max_tokens=1024)
                    # Iterate through each image file
                    for image_file in image_files:
                        print('image_file',image_file)
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                        print('processed_image_path',processed_image_path)
                        # Encode the image
                        image = encode_image1(processed_image_path)
    
                        # Determine the image MIME type
                        if processed_image_path.endswith('.jpeg') or processed_image_path.endswith('.jpg'):
                            mime_type = 'image/jpeg'
                        elif processed_image_path.endswith('.png'):
                            mime_type = 'image/png'
                        else:
                            mime_type = 'image/jpeg'  # Default to JPEG if BMP was converted
    
                        # Invoke your chatbot with the message containing the image
                        msg = chain.invoke([
                            AIMessage(
                                content="You are a useful bot that is especially good at OCR from images"
                            ),
                            HumanMessage(
                                content=[
                                    {"type": "text", "text": "You are a useful bot that is especially good at OCR from images"},
                                    {
                                        "type": "image_url",
                                        "image_url": {
                                            "url": f"data:{mime_type};base64,{image}"
                                        },
                                    },
                                ]
                            )
                        ])
                        ind_response1=1
                        docs1=msg.content
                        print('img out',msg.content)
                else:         
                    ind_response1,docs1=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                
                if not isinstance(docs1, list):
                    # Wrap it in a list if it's not already
                    docs1 = [docs1]
    
                if docs1:
                    for i, doc in enumerate(docs1):
                        if isinstance(doc, str):  # Handle case where doc is a string
                            docs1[i] = {
                                "page_content": f"context of the first uploaded {file} image: {doc}",
                                "metadata": {"source": files_dir[0]},  # Basic metadata with source
                            }
                        elif hasattr(doc, "page_content"):  # Handle case where doc is an object
                            doc.page_content = {
                                f"context of the first uploaded {file} document": doc.page_content
                            }
                            
                    # for i,doc in enumerate(docs1):
                    #     doc.page_content = {
                    #         f"context of the first uploaded {file} document":doc.page_content
                    #     }
                    add_document_ai_to_es( username, conver_id, docs1,forms_index,'upload1',f"{file}")
                for f in files_dir:
                    os.remove(f)
                    
                if ind_response1==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload1 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            
            elif action_type == 'action3':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/formsupload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                delete_document_content(forms_index,conver_id,'upload2')
                
                image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
                if image_files:
                # Use glob to get all supported image files (jpg, jpeg, png, bmp)
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
                    chain = ChatOpenAI(openai_api_key=config('SECRET_KEY'), model="gpt-4o", max_tokens=1024)
                    # Iterate through each image file
                    for image_file in image_files:
                        # print('image_file',image_file)
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                        # print('processed_image_path',processed_image_path)
                        # Encode the image
                        image = encode_image1(processed_image_path)
    
                        # Determine the image MIME type
                        if processed_image_path.endswith('.jpeg') or processed_image_path.endswith('.jpg'):
                            mime_type = 'image/jpeg'
                        elif processed_image_path.endswith('.png'):
                            mime_type = 'image/png'
                        else:
                            mime_type = 'image/jpeg'  # Default to JPEG if BMP was converted
    
                        # Invoke your chatbot with the message containing the image
                        msg = chain.invoke([
                            AIMessage(
                                content="You are a useful bot that is especially good at OCR from images"
                            ),
                            HumanMessage(
                                content=[
                                    {"type": "text", "text": "You are a useful bot that is especially good at OCR from images"},
                                    {
                                        "type": "image_url",
                                        "image_url": {
                                            "url": f"data:{mime_type};base64,{image}"
                                        },
                                    },
                                ]
                            )
                        ])
                        ind_response2=1
                        docs2=msg.content
                        # print('img out',msg.content)
                else:         
                    ind_response2,docs2=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                # datpdf=pdf_to_json_new(files_dir[0])

                if not isinstance(docs2, list):
                    # Wrap it in a list if it's not already
                    docs2 = [docs2]
    
                if docs2:
                    for i, doc in enumerate(docs2):
                        if isinstance(doc, str):  # Handle case where doc is a string
                            docs2[i] = {
                                "page_content": f"context of the first uploaded {file} image: {doc}",
                                "metadata": {"source": files_dir[0]},  # Basic metadata with source
                            }
                        elif hasattr(doc, "page_content"):  # Handle case where doc is an object
                            doc.page_content = {
                                f"context of the first uploaded {file} document": doc.page_content
                            }

                # if docs2:
                #     for i,doc in enumerate(docs2):
                #         doc.page_content = {
                #             f"context of the second uploaded {file} document": doc.page_content

                #             # f"context of the second uploaded {file} document": doc.page_content
                #         }
                    add_document_ai_to_es( username, conver_id, docs2,forms_index,'upload2',f"{file}")
                
                for f in files_dir:
                    os.remove(f)
                if ind_response2==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload2 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            elif action_type == 'action4':          
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                # user_prompt = request.POST.get('message')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')
                field_list=['Policy Number','Policy Effective Date','Policy Issue Date','Schedule of Forms and Endorsements',
                            'Common Policy Declarations and Conditions','Declarations','Coverage','Endorsements']
                # user_prompt = 'give me the policy number,policy effective date,coverage,Declarations,Endorsements of each uploaded documents and policy number was placed on the footer of each page and return output in table format with column name as each document name and row field are policy number,policy effective date,coverage,Declarations,Endorsements'#request.POST.get('message')
                # sleep(100)
                user_prompt = """Extract the following information from each uploaded document and return the output strictly in a table format only, with column names as the document names and row fields as: Policy Number, Policy Effective Date, Policy Issue Date, Schedule of Forms and Endorsements, Common Policy Declarations and Conditions, Declarations, Coverage, and Endorsements.

                **Ensure the following**:  
                1. The Policy Number format must strictly follow the pattern of two alphabetic characters, a space, followed by two digits, a space, repeated thrice (e.g., AG 00 02 30 06). The Policy Number is typically found in the header or footer of the document. If the policy number field appears empty, perform a thorough search across the document, including all sections (header, footer, body text, metadata, etc.), to ensure the policy number is identified and extracted wherever it exists.
                2. If any field is missing or not found in a document, include only the data from the fields that are present. Use as much detail as possible, up to 100 words, in each field that is available. Leave missing fields blank in the table.  

                **For Specific Fields**:  
                - **Schedule of Forms and Endorsements**: Get data based on the list of policy forms, schedules, and endorsements by line of business.  
                - **Declarations**: Include line of coverage-specific declarations (general liability, business auto, business income, contractors equipment, etc.).  
                - **Coverages**: Provide a comparison of coverages offered by the underlying policy (general liability, business auto, business income, contractors equipment, etc.).  
                - **Endorsements**: List the name of each endorsement included in the document with a summary of how each endorsement affects the underlying policy.  

                **Response Format**:  
                - Provide the extracted data in a table format as described above.  
                - Include an "Overall Summary" section directly under the table. This summary must summarise the table data's important observations across the documents, titled as **summary**."""


                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_chat_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4o,'gpt-4o',is_safe, flagged_categories)
                
                
                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you.  
                    You are a helpful assistant. Include the filename within your response where relevant.  
                    Extract the following information from each uploaded document and return the output strictly in a table format only.  

                    **Table Format Requirements**:  
                    - The table must have a header row with "Field Name" in the first column, followed by the uploaded document names as subsequent column headers (e.g., "Uploaded Document 1 Name" and "Uploaded Document 2 Name").  
                    - Row fields should include the **actual values** for the following fields: Policy Number, Policy Effective Date, Policy Issue Date, Schedule of Forms and Endorsements, Common Policy Declarations and Conditions, Declarations, Coverage, and Endorsements.  

                    **Ensure the following**:  
                    1.The Policy Number must follow one of the patterns below:
                    - Two alphabetic characters, a space, followed by two digits, a space, repeated thrice (e.g., AG 00 02 30 06).
                    - Three alphabetic characters, a space, followed by two digits, a space, repeated twice (e.g., CNI 90 22 11 22).
                    - Two alphabetic characters, a space, another two alphabetic characters, a space, followed by two digits, a space, and repeated once (e.g., CG DS 01 10 01).
                    - Two alphabetic characters, a space, one alphabetic character, a space, followed by three digits, a space, then two digits, a space, and two digits (e.g., IL U 002 05 10).
                      The Policy Number is usually found in the header or footer of the document. Ensure to search these sections carefully when extracting the number.
                    2. If any field is missing or not found in a document, include only the data from the fields that are present. Use as much detail as possible, up to 100 words, in each field that is available. Leave missing fields blank in the table.  
                    3. Perform a comparison for each field across all uploaded documents to ensure consistency.

                    **For Specific Fields**:  
                    - **Schedule of Forms and Endorsements**: Get data based on the list of policy forms, schedules, and endorsements by line of business.  
                    - **Declarations**: Include line of coverage-specific declarations (general liability, business auto, business income, contractors equipment).  
                    - **Coverages**: Provide a comparison of coverages offered by the underlying policy (general liability, business auto, business income, contractors equipment).  
                    - **Endorsements**: List the name of each endorsement included in the document with a summary of how each endorsement affects the underlying policy.  

                    **Response Format**:  
                    - Provide the extracted data in a table format as described above.  
                    - Include an "Overall Summary" section directly under the table. This summary must summarise the table data's important observations across the documents, titled as **summary**.  

                    **Additional Note**:  
                    If questions are asked where there is no relevant context available, simply respond with:  
                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"

                    Context: {context}  

                    {chat_history}  
                    Assistant:"""
                    
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
         
                               
                    result = es.search(
                        index=forms_index,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                    )
                    
                    if result and len(result["hits"]["hits"]) > 0:
                        doc_items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        doc_items = []   
                        
                    uploaded_document_docs = []
                    if doc_items:
                        for i in doc_items:
                            page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""

                            # Ensure metadata is a dictionary
                            metadata = i[0].get('metadata')
                            if not isinstance(metadata, dict):
                                # Handle the case where metadata is not a dict
                                # Example: convert to dict if it's a stringified JSON
                                try:
                                    metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                                except (json.JSONDecodeError, TypeError):
                                    metadata = {}

                            uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
                
                    # combined_docs = uploaded_document_docs + primary_docs_text
                    combined_docs = uploaded_document_docs
                    logger.info('combined_docs%s'%combined_docs)


                    memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
                    chat_history = memory_variables['chat_history']
                    formatted_template = template.format(
                        context=combined_docs, 
                        chat_history=chat_history,
                        # question=user_prompt
                    )
                    
                    for attempt in range(4):
                        logger.info('attempt%s'%attempt)
                        assistant_response=get_resptable(formatted_template)

                        if 'Field Name' in assistant_response:
                            logger.info('assistant_response if cond %s'%assistant_response)
                            assistant_response='Field Name'+assistant_response.split('Field Name')[-1]
                            assistant_response=assistant_response.replace("```",'')
                            if '**summary:**' in assistant_response:
                                assistant_response=assistant_response.replace('**summary:**','**Summary**')
                            elif '**Summary:**' in assistant_response:
                                assistant_response=assistant_response.replace('**Summary:**','**Summary**')
                            elif 'Overall Summary:' in assistant_response:
                                assistant_response=assistant_response.replace('Overall Summary:','**Summary**')
                            for fie in field_list:
                                if '**'+fie+'**' in assistant_response:
                                    assistant_response=assistant_response.replace('**'+fie+'**',fie)
                            break
                    
                    output_token_cost_gpt4o =  num_tokens_from_string(assistant_response,'gpt-4o') 
                    
                    query = {
                        "query": {
                            "bool": {
                                "must": [
                                    # {"term": {"user_id.keyword": alliance_number}},
                                    {"term": {"username": candidate}},
                                ]
                            }
                        }
                    }
    
                    # Search the index for matching records
                    response = es.search(index=user_details_index, body=query)

                    assigned_tokens=0
                    overall_tokens=0
                    # Process the results
                    for hit in response["hits"]["hits"]:
                        assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                        overall_tokens = hit["_source"].get("overall_tokens", 0)
                    
                    logger.info('assigned_tokens %s'%assigned_tokens)
                    logger.info('overall_tokens %s'%overall_tokens)
                    logger.info('output_token_cost_gpt4o %s'%output_token_cost_gpt4o)

                    if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
                        print('if satisfied',candidate)
                        print('output_token_cost_gpt4o',output_token_cost_gpt4o)

                        response = es.search(
                        index=user_details_index,
                        body={
                                "query": {
                                    "bool": {
                                        "must": [
                                            # {"term": {"user_id.keyword": alliance_number}},
                                            {"term": {"username": candidate}},
                                        ]
                                    }
                                },
                            }
                        )
                    
                        if response['hits']['total']['value'] > 0:
                            userdoc_id = response['hits']['hits'][0]['_id']
                            print('userdoc_id',userdoc_id)
                            # Use script to either add or update 'limits_token'
                            es.update(
                                index=user_details_index,
                                id=userdoc_id,
                                body={
                                    "script": {
                                        "source": """
                                            if (ctx._source.containsKey('limits_token')) {
                                                // If limits_token exists, update its value
                                                ctx._source.limits_token = params.limits_token;
                                            } else {
                                                // If limits_token doesn't exist, create it and set the value
                                                ctx._source['limits_token'] = params.limits_token;
                                            }
                                        """,
                                        "params": {
                                            "limits_token": 0
                                        }
                                    }
                                }
                            )
    
                        
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)
    
                        return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
                    else:
                        logger.info('output_token_cost_gpt4o to sys es %s'%output_token_cost_gpt4o)
                        add_system_message_to_es(candidate,username,session_id, assistant_response,output_token_cost_gpt4o,'gpt-4o')
                        org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
                        logger.info('output_token_cost_gpt4o org es %s'%output_token_cost_gpt4o)
                        user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
                        return JsonResponse({'response': assistant_response,'session_id':session_id})                    
                    
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })    
                
    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        return JsonResponse({'error': str(e)})
    


@csrf_exempt
def activate_query_conversation(request):
    if request.method == "POST":
        try:
            # Get data from POST request
            organization_name = request.POST.get("organization_name")
            username = request.POST.get("username")

            # Construct the Elasticsearch query
            query = {
                "query": {
                    "bool": {
                        "must": [
                            # {"term": {"orgname": organization_name}},
                            {"term": {"username": username}},
                        ]
                    }
                }
            }

            # Search the index for matching records
            response = es.search(index=user_details_index, body=query)

            # Variable to hold the conversation activation state
            activate_conv = True

            # Process the results
            for hit in response["hits"]["hits"]:
                assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                overall_tokens = hit["_source"].get("overall_tokens", 0)
                token_limits=hit["_source"].get("limits_token", 0)
                if token_limits ==0:
                    activate_conv = False
                else:
                    if assigned_tokens and not overall_tokens:
                        if assigned_tokens>100:
                            activate_conv = True
                        else:
                            activate_conv = False
                        # print('satisfied')
    
                    # If both assigned and overall toassigned_tokenskens are present
                    elif assigned_tokens and overall_tokens:
                        # Calculate the difference
                        sub_res = assigned_tokens - overall_tokens
    
                        # Check conditions and set activation status
                        if sub_res < 0 or sub_res <= 100:
                            activate_conv = False
                        else:
                            activate_conv = True
            return JsonResponse({"activate_conv": activate_conv})

        except Exception as e:
            # Log the error and return an error response
            logger.info("check token index %s" % e)
            return JsonResponse({"error": str(e)}, status=500)
    else:
        # Return a method not allowed error for non-POST requests
        return JsonResponse({"error": "Method not allowed"}, status=405)



@csrf_exempt
def update_thumbs_flag(request):
# Search for the document using session ID and answer field
    try:
        # session_id,index_name,answer,
        if request.method == 'POST':
            print('thumbs flag')
            session_id = request.POST.get('session_id')
            index_name =request.POST.get('indexname')
            answer=request.POST.get('answer')
            thumbs_flag=request.POST.get('thumbs_flag')
            sleep(2)
            
            logger.info('index_name%s'%index_name)
            logger.info('session_id%s'%session_id)

            response = es.search(
                index=index_name,
                body={
                    "query": {"term": {"session_id.keyword": session_id}},
                    "size": 1000  # Adjust size to the number of documents you expect, e.g., 1000
                }
            )
            
            # print('response length',len(response))

            # Check if any documents are found
            total_docs = response['hits']['total']['value']
            logger.info('total_docs%s'%total_docs)
            if total_docs > 0:
                for hit in response['hits']['hits']:
                    doc_id = hit['_id']  # Get document ID
                    if 'answer' in hit['_source'] :
                        doc_source = hit['_source']['answer']  # Get the document source
                        if doc_source==answer:
                            # print('yes')
                            es.update(
                                index=index_name,
                                id=doc_id,
                                body={
                                    "doc": {
                                        "feedback": thumbs_flag,  # Update the feedback flag with the value 'good', 'bad', or 'no feedback'
                                    },
                                    "doc_as_upsert": True  # Create the document if it doesn't exist
                                }
                            )
                return JsonResponse({'status': 'success', 'message': 'Document updated'})
    
            else:
                return JsonResponse({'status': 'failure', 'message': 'No documents found'}, status=404)
        
    except NotFoundError:
        print(f"Error: No documents found for session_id {session_id} and answer '{answer}'.")

@csrf_exempt
def compforms_lettergen(request):
    if request.method == 'POST':
        session_id = request.POST.get('session_id')
        candidate =request.POST.get('candidate_name')
        org_name=request.POST.get('organization_name')
        mailid=request.session.get('email')      
        
        user_prompt = """Analyze the information provided from both ISO forms and generate a letter emphasizing the key similarities, differences, and insights. Ensure the letter clearly reflects the comparison, maintaining adherence to the formats specified in the ISO forms. Include the filename(s) of the ISO forms within your response where relevant."""


        update_create_at_with_sessionid(es_index_name, session_id)
        input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
        
        template = """Analyze the information provided from both ISO forms and generate a letter emphasizing the key similarities, differences, and insights. Ensure the letter clearly reflects the comparison, maintaining adherence to the formats specified in the ISO forms. Include the filename(s) of the ISO forms within your response where relevant.  

        Context: {context}  

        {chat_history}  
        Assistant:"""
        
        
        
        memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')

                   
        result = es.search(
            index=forms_index,
            query={"match": {"session_id": session_id}},
            sort="lastmodified_at:asc",
        )
        
        if result and len(result["hits"]["hits"]) > 0:
            doc_items = [
                json.loads(document["_source"]["history"])
                for document in result["hits"]["hits"]
            ]
        else:
            doc_items = []   
            
        uploaded_document_docs = []
        if doc_items:
            for i in doc_items:
                page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""

                # Ensure metadata is a dictionary
                metadata = i[0].get('metadata')
                if not isinstance(metadata, dict):
                    # Handle the case where metadata is not a dict
                    # Example: convert to dict if it's a stringified JSON
                    try:
                        metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                    except (json.JSONDecodeError, TypeError):
                        metadata = {}

                uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
    
        # combined_docs = uploaded_document_docs + primary_docs_text
        combined_docs = uploaded_document_docs
        logger.info('combined_docs%s'%combined_docs)


        memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
        chat_history = memory_variables['chat_history']
        formatted_template = template.format(
            context=combined_docs, 
            chat_history=chat_history,
            # question=user_prompt
        )
        
        assistant_response=get_resptable(formatted_template)
        print('assistant_response',assistant_response)
        output_token_cost_gpt4o =  num_tokens_from_string(assistant_response,'gpt-4o-mini') 
        
        query = {
            "query": {
                "bool": {
                    "must": [
                        # {"term": {"user_id.keyword": alliance_number}},
                        {"term": {"username": candidate}},
                    ]
                }
            }
        }

        # Search the index for matching records
        response = es.search(index=user_details_index, body=query)

        assigned_tokens=0
        overall_tokens=0
        # Process the results
        for hit in response["hits"]["hits"]:
            assigned_tokens = hit["_source"].get("assigned_tokens", 0)
            overall_tokens = hit["_source"].get("overall_tokens", 0)
        
        logger.info('assigned_tokens %s'%assigned_tokens)
        logger.info('overall_tokens %s'%overall_tokens)
        logger.info('output_token_cost_gpt4o %s'%output_token_cost_gpt4o)

        if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
            print('if satisfied',candidate)
            print('output_token_cost_gpt4o',output_token_cost_gpt4o)

            response = es.search(
            index=user_details_index,
            body={
                    "query": {
                        "bool": {
                            "must": [
                                # {"term": {"user_id.keyword": alliance_number}},
                                {"term": {"username": candidate}},
                            ]
                        }
                    },
                }
            )
        
            if response['hits']['total']['value'] > 0:
                userdoc_id = response['hits']['hits'][0]['_id']
                print('userdoc_id',userdoc_id)
                # Use script to either add or update 'limits_token'
                es.update(
                    index=user_details_index,
                    id=userdoc_id,
                    body={
                        "script": {
                            "source": """
                                if (ctx._source.containsKey('limits_token')) {
                                    // If limits_token exists, update its value
                                    ctx._source.limits_token = params.limits_token;
                                } else {
                                    // If limits_token doesn't exist, create it and set the value
                                    ctx._source['limits_token'] = params.limits_token;
                                }
                            """,
                            "params": {
                                "limits_token": 0
                            }
                        }
                    }
                )

            
            # Search the index for matching records
            response = es.search(index=user_details_index, body=query)

            return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
        else:
            # logger.info('output_token_cost_gpt4o to sys es %s'%output_token_cost_gpt4o)
            # org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
            # logger.info('output_token_cost_gpt4o org es %s'%output_token_cost_gpt4o)
            # user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
            return JsonResponse({'response': assistant_response,'session_id':session_id})   
    return JsonResponse({'error': 'Invalid request method'}, status=405)

@csrf_exempt
def text_to_speech(request):
    if request.method == 'POST':
        input_text = request.POST.get('message')
        if not input_text:
            return JsonResponse({'error': 'No input text provided'}, status=400)

        try:
            # Fetch TTS from OpenAI
            response = openai.audio.speech.create(
                model="tts-1",
                voice="alloy",
                input=input_text
            )
            # Stream the audio content to memory
            audio_content = response.content
            # Convert the audio content to base64
            base64_encoded_audio = base64.b64encode(audio_content).decode('utf-8')

            return JsonResponse({'audio': base64_encoded_audio})
        except Exception as e:
            return JsonResponse({'error': str(e)}, status=500)
    return JsonResponse({'error': 'Invalid request method'}, status=405)


@csrf_exempt
def speech_to_text(request):
    if request.method == 'POST':
        if 'file' in request.FILES:
            audio_file = request.FILES['file']
            try:
                # Convert InMemoryUploadedFile to bytes
                audio_bytes = audio_file.read()

                client = OpenAI()
                transcription = client.audio.transcriptions.create(
                    model="whisper-1", 
                    file=(audio_file.name, audio_bytes),
                    response_format="text"
                )
                return JsonResponse({'transcription': transcription})
            except Exception as e:
                return JsonResponse({'error': str(e)}, status=500)
        else:
            return JsonResponse({'error': 'No file provided'}, status=400)
    else:
        return JsonResponse({'error': 'Invalid request method'}, status=405)


def today_messages(request):
    try:
        t=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_today <= lastmodified_at_datetime <= current_datetime:
                t.append(item)
        # print('to',t)
        if t:
            #session_ids = list(set(d["session_id"] for d in t))
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        
                        for j in document["_source"]["messages"]:
                                logger.info('retrived data %s'%j)
                                for i in j['data']['content']:
                                    logger.info('second retrived %s'%i)
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'

                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'

                        }
                        
                        items.append(yest_dict)
            elif username =='forms_log':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    print('id_user',id_user)
                    result1 = es.search(
                        index=forms_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        # print('session_hist',session_hist)
                        for i in session_hist[1:]:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == SystemMessage:
                                 chat_h= {
                                     'system': i.content}
                            elif type(i) == AIMessage:
                                  chat_h= {
                                      'assistant': i.content}

                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'forms_log'
                        }
                        items.append(yest_dict)
                        
            else:
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'
                        }
                        
                        items.append(yest_dict)
                

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        logger.info('today sec error %s'%e)
        # Log the error and return a JSON error response
        return JsonResponse({"error": str(e)}, status=500)

def yesterday_messages(request):
    try:
        y=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        end_of_yesterday = start_of_today - timedelta(microseconds=1) 
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_yesterday <= lastmodified_at_datetime <= end_of_yesterday:
                y.append(item)
        if y:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        for j in document["_source"]["messages"]:
                                for i in j['data']['content']:
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'
                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'
                        }
                        
                        items.append(yest_dict)
            elif username =='forms_log':
                 session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                 yest_dict = {}
                 items=[]
                 for id_user in session_ids[::-1]:
                     print('id_user',id_user)
                     result1 = es.search(
                         index=forms_index,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result1 and len(result1["hits"]["hits"]) > 0:
                         session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                     
                     tot_hist=[]
                     chat_h={}
                     for i in session_hist1:
                         chat_h = {
                             'file': i}
                         tot_hist.append(chat_h)
                     
                     result = es.search(
                         index=es_index_name,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result and len(result["hits"]["hits"]) > 0:
                         session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                           for document in result["hits"]["hits"])
     
                         # tot_hist=[]
                         # chat_h={}
                         # print('session_hist',session_hist)
                         for i in session_hist[1:]:
                             if type(i) == HumanMessage:
                                 chat_h = {
                                     'user': i.content}
                             elif type(i) == SystemMessage:
                                  chat_h= {
                                      'system': i.content}
                             elif type(i) == AIMessage:
                                   chat_h= {
                                       'assistant': i.content}

                             tot_hist.append(chat_h)
                         
                         chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
     
                         yest_dict = {
                             'session_id': id_user,
                             'chat_history':tot_hist,
                             'content': chat_id_names,
                             'app_switch':'forms_log'
                         }
                         items.append(yest_dict)
            else:
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'

                        }
                        
                        items.append(yest_dict)
                   

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        logger.info('yesterday sec error %s'%e)

        return JsonResponse({"error": str(e)}, status=500)
    
def previous_7_days(request):
    try:
        s=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)     
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_seven_days_ago <= lastmodified_at_datetime < start_of_yesterday:
                s.append(item)

        if s:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        for j in document["_source"]["messages"]:
                                for i in j['data']['content']:
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': i['data'][1]['image_url']['url'].replace('data:image/jpeg;base64,', '')}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'

                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        # logger.info('do ret %s'%result1["hits"]["hits"])
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]

                    #     session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"] if 'filename' in document["_source"]]
                    # else:
                    #     session_hist1=[]
                    

                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)

                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )

                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'

                        }
                        
                        items.append(yest_dict)

            elif username =='forms_log':
                 session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                 yest_dict = {}
                 items=[]
                 for id_user in session_ids[::-1]:
                     print('id_user',id_user)
                     result1 = es.search(
                         index=forms_index,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result1 and len(result1["hits"]["hits"]) > 0:
                         session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                     
                     tot_hist=[]
                     chat_h={}
                     for i in session_hist1:
                         chat_h = {
                             'file': i}
                         tot_hist.append(chat_h)
                     
                     result = es.search(
                         index=es_index_name,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result and len(result["hits"]["hits"]) > 0:
                         session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                           for document in result["hits"]["hits"])
     
                         # tot_hist=[]
                         # chat_h={}
                         # print('session_hist',session_hist)
                         for i in session_hist[1:]:
                             if type(i) == HumanMessage:
                                 chat_h = {
                                     'user': i.content}
                             elif type(i) == SystemMessage:
                                  chat_h= {
                                      'system': i.content}
                             elif type(i) == AIMessage:
                                   chat_h= {
                                       'assistant': i.content}

                             tot_hist.append(chat_h)
                         
                         chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
     
                         yest_dict = {
                             'session_id': id_user,
                             'chat_history':tot_hist,
                             'content': chat_id_names,
                             'app_switch':'forms_log'
                         }
                         items.append(yest_dict)
                         
            else:
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'

                        }
                        
                        items.append(yest_dict)
                   

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}

        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        logger.info('previous 7 days error %s'%e)
        return JsonResponse({"error": str(e)}, status=500)
