# views.py
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import ChatMessage
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
from datetime import timedelta,datetime,timezone
from time import time,sleep
from langchain.chat_models import ChatOpenAI
import os,sys
# from langchain_community.vectorstores import FAISS
import io

# import uuid
from langchain_core.messages import (
    BaseMessage,
    message_to_dict,
    messages_from_dict,
)
from glob import glob
from langchain.prompts import ChatPromptTemplate,PromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import ElasticsearchStore
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages.human import HumanMessage
from langchain_core.messages.ai import AIMessage
import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch,NotFoundError
from collections import OrderedDict
from decouple import config
from openai import OpenAI
from PIL import Image
from io import BytesIO
from langchain_core.messages.system import SystemMessage
sys.path.append(os.path.join(os.path.dirname(__file__)))
# from index_attached import start_indexing
from langchain_core.documents import Document
import os
from Loader_functions_utils import DocumentLoader
document_loader = DocumentLoader()
from langchain.docstore.document import Document as BaseDocument
from langchain.load.dump import dumps
import tiktoken

import base64
import openai
import LogUtils
logger = LogUtils.getRootLogger()

def chatbot(request):
    return render(request, 'index.html')



# Define the search query
def get_messages_by_user_and_session(username,ind_name):
    query = {
       "query": {
           "bool": {
               "must": [
                   {"match": {"username": username}},
               ]
           }
       },
       "size":10000,
       "sort": [
           {"lastmodified_at": {"order": "asc"}}
       ]
    }
    # Execute the search query
    response = es.search(index=ind_name, body=query)

    # Extract and return the messages
    messages = [hit['_source'] for hit in response['hits']['hits']]
    return messages

  
    
os.environ["OPENAI_API_KEY"] = config('SECRET_KEY')
logger.info('openapikey %s'%config('SECRET_KEY'))
embeddings = OpenAIEmbeddings()
es_index_name ='allibot_v2_gpt'
chatname_index='get_chatid'
imtext_index='textlens'
doc_index='documents_up'
# logger.info(es_index_name)
es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False

try :
    db = ElasticsearchStore(
        es_connection=es,
        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
        embedding=embeddings,
        strategy=ElasticsearchStore.ExactRetrievalStrategy()
    )
except Exception as ex:        
    logger.exception('Exception occured due to %s' % ex)
    if check_elastic_status():
        db = ElasticsearchStore(
            es_connection=es,
            index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
            embedding=embeddings,
            strategy=ElasticsearchStore.ExactRetrievalStrategy()
        )
 
def unique_sources_with_pages(meta_data):
    unique_sources = set()
    
    for item in meta_data:
        source = item.get('source', '') 
        # print('main',source)
        filename = os.path.basename(source)  # Get the file name with extension
        name_without_ext = os.path.splitext(filename)[0]  
        # print('name_without_ext',name_without_ext)
        unique_sources.add(name_without_ext)
    
    # Convert the set of unique sources to a comma-separated string
    unique_sources_str = ', '.join(unique_sources)
    
    return unique_sources_str

def get_chat_id_names_by_session_id(index_name, session_id):
    query = {
        "size": 5000,
        "query": {"term": {"session_id.keyword": session_id}}  # Match the exact session ID
    }    
    response = es.search(index=index_name, body=query)
    
    chat_id_name=''
    for hit in response['hits']['hits']:
        source = hit['_source']
        if 'chat_id_name' in source:
            chat_id_name = source.get('chat_id_name')
       
    return chat_id_name


def add_user_message_to_es(customer,username, session_id, user_prompt,index,user_token,modelname):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(HumanMessage(content=user_prompt)),
            ensure_ascii=True,
        ),
        "human token" : user_token,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    return res

def add_assistant_message_to_es(customer,username, session_id, answer,assistant_token,modelname):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(AIMessage(content=answer)),
            ensure_ascii=True,
        ),
        "assistant token" : assistant_token,
        "feedback" : "null",

    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res

def search_by_session_id(index_name, session_id):
    # Define the query
    query = {
        "size": 10000,
        "query": {
            "match": {
                "session_id": session_id
            }
        },
        "sort": [
            {
                "lastmodified_at": {
                    "order": "asc"
                }
            }
        ]
    }

    # Perform the search
    response = es.search(index=index_name, body=query)

    return response['hits']['hits']

def document_to_dict(doc):
    # Convert Document to a string representation if storing as text
    return {
        'page_content': doc.page_content,
        'metadata': json.dumps(doc.metadata) if hasattr(doc, 'metadata') else '',  # Convert metadata to JSON string
    }

def add_document_ai_to_es(username, session_id, docs1, es_index_name,orderid):
    # Convert docs1 to a list of dictionaries as strings
    documents_dicts = [document_to_dict(doc) for doc in docs1]
    
    # Create a document with the required fields
    doc = {
        'username': username,
        'session_id': session_id,
        'user_prompt': orderid,  # Store as a JSON string
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(documents_dicts, ensure_ascii=True),
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res


def update_lastmodified_at(index_name, doc_id):
    # Get current timestamp in milliseconds
    current_timestamp = int(datetime.now().timestamp() * 1000)
    
    # Define the update body
    update_body = {
        "doc": {
            "lastmodified_at": current_timestamp  # Update with timestamp in milliseconds
        }
    }

    # Perform the update
    es.update(index=index_name, id=doc_id, body=update_body)
    
def update_create_at_with_sessionid(index_name,session_id):
    documents = search_by_session_id(index_name, session_id)
    # Update 'lastmodified_at' field for each matched document
    for doc in documents:
        doc_id = doc['_id']  # Get the document ID
        update_lastmodified_at(index_name, doc_id)

def fix_syntax_error(text):
    # Replace all double quotes with escaped double quotes
    fixed_text = text.replace('"', '\\"')
    return fixed_text        

def resize_image(image_path, size=(224, 224)):
    with Image.open(image_path) as img:
        img.thumbnail(size, Image.LANCZOS)
        buffer = BytesIO()
        img.save(buffer, format="BMP")
        return buffer.getvalue()

def encode_image(image_data):
    return base64.b64encode(image_data).decode("utf-8")

def get_current_iuser_idso_date():
    return datetime.utcnow().isoformat()    

def store_chat_history(es, index_name, chat_entry):
    es.index(index=index_name, document=chat_entry)

def retrieve_chat_data(indexname,session_id):
    # Fetch data from Elasticsearch
    response = es.get(index=indexname, id=session_id)
    
    if response["found"]:
        chat_data = response["_source"]
        return chat_data["documents"], chat_data["chat_history"]
    else:
        return None, None



def chat_hist_img(base64_image,user_id,message,response,session_id,index_name_img,es):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        #"timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
        "img": base64_image
    }
    
    store_chat_history(es, index_name_img, chat_entry)
  
  
def img_message_to_dict(message):
    if isinstance(message, HumanMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "human", "data": {"content": content}}
    elif isinstance(message, AIMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "ai", "data": {"content": content}}
    else:
        raise ValueError(f"Unexpected message type: {type(message)}")



def img_message_from_dict(message_dict):
    # Check if the input is a list of messages
    if isinstance(message_dict, list):
        return [img_message_from_dict(m) for m in message_dict]
    
    message_type = message_dict.get("type")
    if message_type == "human":
        return HumanMessage(content=message_dict["data"]["content"])
    elif message_type == "ai":
        return AIMessage(content=message_dict["data"]["content"])
    else:
        raise ValueError(f"Got unexpected message type: {message_type}")    


def retrieve_chat_history(es, index_name, session_id):
    res = es.search(index=index_name, body={
        "query": {
            "match": {"session_id": session_id}
        },
        "size": 5000
    })
    
    return res['hits']['hits']

def chat_hist_txt(user_id,session_id,message,response,es,index_name):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        # "timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
    }
    
    store_chat_history(es, index_name, chat_entry)  
    
def encode_image1(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def avi_to_base64(file):
    with open(file, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def resize_and_convert_image(image_path):
    with Image.open(image_path) as img:
        # Resize the image if it exceeds a certain size (e.g., width or height > 1024)
        max_size = (1024, 1024)
        img.thumbnail(max_size, Image.LANCZOS)

        # Convert BMP images to JPEG
        if img.format == 'BMP':
            img = img.convert('RGB')  # Convert to RGB first
            converted_path = image_path.replace('.bmp', '.jpeg')
            img.save(converted_path, format='JPEG')
            os.remove(image_path)
            return converted_path
        else:
            # Save the resized image to a temporary file in the same format
            converted_path = image_path.replace(f".{img.format.lower()}", f".{img.format.lower()}")
            img.save(converted_path)
            return converted_path    
        
def add_documents(file_name1):
    _, extension_file_name1 = os.path.splitext(file_name1)
    resp=1
    docs1=''
    # Load the first file
    try:
        if extension_file_name1.lower() == ".xlsx":
            docs1 = document_loader.excel_file_loader(file_name1)
        elif extension_file_name1.lower() == ".csv":
            docs1 = document_loader.csv_file_loader(file_name1)
        elif extension_file_name1.lower() == ".tsv":
            docs1 = document_loader.tsv_file_loader(file_name1)
        elif extension_file_name1.lower() in [".doc", ".docx"]:
            docs1 = document_loader.doc_docx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pdf":
            docs1 = document_loader.pdf_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pptx":
            docs1 = document_loader.pptx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".xml":
            docs1 = document_loader.xml_file_loader(file_name1)
        elif extension_file_name1.lower() == ".json":
            docs1 = document_loader.json_file_loader(file_name1)
        elif extension_file_name1.lower() == ".txt":
            docs1 = document_loader.text_file_loader(file_name1)
        elif extension_file_name1.lower() == ".md":
            docs1 = document_loader.markdown_file_loader(file_name1)
        elif extension_file_name1.lower() == ".html":
            docs1 = document_loader.html_file_loader(file_name1)
        else:
            raise ValueError(f"Unsupported file type: {extension_file_name1}")
    except Exception as e:
        resp=0  

    return resp,docs1

def query_and_combine(past_messages, user_query):
    llm = ChatOpenAI(
            model="gpt-4o",
        )
    # Specify the search type (e.g., 'similarity' or 'exact_match')
    search_type = 'similarity'  # Adjust this based on your requirements
    
    # Use the search method with the search_type argument
    retrieved_data = db.search(user_query, search_type=search_type)

    # Format the retrieved data
    formatted_data = f"{retrieved_data}"
    
    # Combine past messages with the retrieved data
    combined_messages = past_messages + [
        AIMessage(content=formatted_data)
    ]
    
    # print("combined_messages",combined_messages)
    # Pass the combined data to ChatOpenAI
    response = llm.invoke(combined_messages)
    return response

def delete_document_content(index_name, session_id,condition_check):
    # Define the query to match documents with the given session_id and user_prompt as 'upload1'
    query = {
        "query": {
            "bool": {
                "must": [
                    {"match": {"session_id": session_id}},
                    {"match": {"user_prompt": condition_check}}
                ]
            }
        }
    }

    # Perform the delete_by_query operation
    delete_response = es.delete_by_query(index=index_name, body=query)
    
    # Check if any documents were deleted
    deleted_count = delete_response['deleted']
    
    if deleted_count > 0:
        print(f"Successfully deleted {deleted_count} documents with session_id '{session_id}' and user_prompt 'upload1'.")
    else:
        print("No documents found with the provided session ID and user_prompt 'upload1'.")



def num_tokens_from_string(string: str, model: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.encoding_for_model(model)
    num_tokens = len(encoding.encode(string))
    return num_tokens

def org_token(input_tokens, output_tokens, org_name):
    # Search for the document with the given organization name
    org_toks = es.search(
        index="organization_level_token_count",
        body={
            'query': {
                'match': {
                    'organization_name': org_name
                }
            }
        }
    )['hits']['hits']
    if org_toks:
        for doc in org_toks:
            doc_id = doc['_id']
            doc_source = doc['_source']
        # Retrieve existing input/output tokens with default values
        inp = doc_source.get('input_overall_tokens', 0)
        outp = doc_source.get('output_overall_tokens', 0)
 
        # Update the token values
        input_overall_tokens = input_tokens + inp
        output_overall_tokens = output_tokens + outp
 
        # Document to be updated
        update_doc = {
            'doc': {
                'output_overall_tokens': output_overall_tokens,
                'input_overall_tokens': input_overall_tokens,
                'overall_tokens': output_overall_tokens + input_overall_tokens,
                'organization_name': org_name
            }
        }
 
        # Update the document using its ID
        res = es.update(index="organization_level_token_count", id=doc_id, body=update_doc)
        print(f"Updated document ID: {doc_id}, Response: {res}")
        
    
    
@csrf_exempt
def allibot(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            # request.session.clear()
            if action_type == 'action1':
                username =request.POST.get('username')
                session_id=request.POST.get('session_id')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')


                # chat_history_1 = ElasticsearchChatMessageHistory(es_connection=es,index=es_index_name, session_id=session_id)
                user_prompt = request.POST.get('message')
                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4_0125_preview = num_tokens_from_string(user_prompt, 'gpt-4-0125-preview') 
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4_0125_preview,'gpt-4-0125-preview')                
                
                template = """Answer the question in your own words as truthfully as possible from the context given to you. If there is any MCQ question explain why the choice is correct.
                If you do not know the answer to the question, simply respond with "I don't know. Can you ask another question".
                Response must be in and around 200 words. it must be in paragraph manner and it must not exceed 4 paragraphs.
                Give a line of space between the paragraphs.
                If questions are asked where there is no relevant context available, simply respond with "That's a great question! I'm not sure of the answer right now.  Can you ask your question a different way? I am excited to assist you further!"
                Context: {context}
        
        
                {chat_history}
                Human: {question}
                Assistant:"""

                prompt = PromptTemplate(input_variables=["context", "chat_history", "question"], template=template)
            
                memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
                            
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": session_id}},  # Use match instead of term
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    items = [
                        json.loads(document["_source"]["history"])
                        for document in result["hits"]["hits"]
                    ]
                else:
                    items = []
    
                session_chat_hist=messages_from_dict(items)
                if session_chat_hist:
                    for i in range(0,len(session_chat_hist)-1):
                        if type(session_chat_hist[i]) == HumanMessage:
                            memory.chat_memory.add_user_message(session_chat_hist[i].content)
                        elif type(session_chat_hist[i]) == AIMessage:
                            memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                            
                qa = ConversationalRetrievalChain.from_llm(
                    llm=ChatOpenAI(model="gpt-4-0125-preview"),retriever=db.as_retriever(search_kwargs={"k": 4}), memory=memory,
                    combine_docs_chain_kwargs={'prompt': prompt},return_source_documents=True)
        
                formatted_response = qa(user_prompt)
                answer=formatted_response['answer']
                print('answer',answer)
                # chat_history_1.add_ai_message(answer)
                output_token_cost_gpt4_0125_preview = num_tokens_from_string(answer, 'gpt-4-0125-preview')

                add_assistant_message_to_es(candidate,username,session_id, answer,output_token_cost_gpt4_0125_preview,'gpt-4-0125-preview')
                org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)                
                source=formatted_response['source_documents']
                # print('source',source)
                meta_data=[]
                for sor in source:
                    # print('meta',sor.metadata)
                    meta_data.append(sor.metadata)
                unique_entries_mod_str = unique_sources_with_pages(meta_data)
                try:
                    
                    json_str=dumps(''.join([doc._lc_kwargs['page_content']+'_lc_kwargs' for doc in formatted_response['source_documents']]))         
                    user_ques=dumps(user_prompt)
                    answer_prompt=dumps(answer)
                    unique_entries_str=dumps(unique_entries_mod_str)
                    cmd=f"python3 /var/www/html/AlliBotV5_chatbot/AlliBotV50/Quesgen_answer_relevancy.py {user_ques} {answer_prompt} {json_str} {unique_entries_str}" 
                    # logger.info(cmd)
                    os.popen(cmd)
                except Exception as e:
                    logger.exception('Exception occured due to %s' % e)
                    
                return JsonResponse({'response': answer,'session_id':session_id})
            

    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        return JsonResponse({'error': str(e)})
    
@csrf_exempt
def doccompare(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            if action_type == 'action1':          
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')


                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'gpt-4o')
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4o,'gpt-4o')

                template = """Answer the question in your own words as truthfully as possible from the context given to you.
                You are a helpful assistant. Include the filename within your response where relevant.
                Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                Give a line of space between the paragraphs.
                If questions are asked where there is no relevant context available, simply respond with "That's a great question! 
                I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
                Context: {context}

                {chat_history}
                Human: {question}
                Assistant:"""
                
                memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')

                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": session_id}},
                    sort="lastmodified_at:asc",
                    size=1000,
                )

                if result and len(result["hits"]["hits"]) > 0:
                    items = [
                        json.loads(document["_source"]["history"])
                        for document in result["hits"]["hits"]
                    ]
                else:
                    items = []                # print('items',items)
                    
                session_chat_hist = messages_from_dict(items)
                # print('hist memory',session_chat_hist)
 
                session_chat_hist=messages_from_dict(items)
                if session_chat_hist:
                    for i in range(0,len(session_chat_hist)):
                        if type(session_chat_hist[i]) == HumanMessage:
                            memory.chat_memory.add_user_message(session_chat_hist[i].content)
                        elif type(session_chat_hist[i]) == AIMessage:
                            memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                            
                           
                result = es.search(
                    index=doc_index,
                    query={"match": {"session_id": session_id}},
                    sort="lastmodified_at:asc",
                )
                
                if result and len(result["hits"]["hits"]) > 0:
                    doc_items = [
                        json.loads(document["_source"]["history"])
                        for document in result["hits"]["hits"]
                    ]
                else:
                    doc_items = []   
                    
                uploaded_document_docs = []
                if doc_items:
                    for i in doc_items:
                        page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""
    
                        # Ensure metadata is a dictionary
                        metadata = i[0].get('metadata')
                        if not isinstance(metadata, dict):
                            # Handle the case where metadata is not a dict
                            # Example: convert to dict if it's a stringified JSON
                            try:
                                metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                            except (json.JSONDecodeError, TypeError):
                                metadata = {}
    
                        uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
            
                db = ElasticsearchStore(
                    es_connection=es,
                    # index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others'],
                    index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
                    embedding=embeddings,
                    strategy=ElasticsearchStore.ExactRetrievalStrategy()
                )
                            
                primary_docs= db.as_retriever(search_kwargs={"k": 2}).get_relevant_documents(user_prompt)
                primary_docs_text = []

                for doc in primary_docs:
                    # Parse the JSON content from page_content
                    primary_docs_text.append(BaseDocument(page_content=doc.page_content))  # or handle differently


                combined_docs = uploaded_document_docs + primary_docs_text
                # print('uploaded_document_docs',uploaded_document_docs)
                # print('combined_docs',combined_docs)

                client = OpenAI()

                memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
                chat_history = memory_variables['chat_history']
                print('chat_history',chat_history)
                formatted_template = template.format(
                    context=combined_docs, 
                    chat_history=chat_history,
                    question=user_prompt
                )


                # Stream the response
                response = client.chat.completions.create(
                    model="gpt-4o",
                    messages=[{"role": "user", "content": formatted_template}],
                    stream=False,
                )

                assistant_response = response.choices[0].message.content
                output_token_cost_gpt4o =  num_tokens_from_string(assistant_response, 'gpt-4o') 
                add_assistant_message_to_es(candidate,username,session_id, assistant_response,output_token_cost_gpt4o,'gpt-4o')
                org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)

                return JsonResponse({'response': assistant_response,'session_id':session_id})
            
            elif action_type == 'action2':
                # print('entered second condition')
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                # print("conver_id",conver_id)
                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/upload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                # ind_response=start_indexing(upload_dir,imtext_index,conver_id,username)
                delete_document_content(doc_index,conver_id,'upload1')
                # delete_document_content(es_index_name,conver_id)

                ind_response1,docs1=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                # print('docs1',docs1)
                if docs1:
                    for doc in docs1:
                        doc.page_content = {
                            f"context of the first uploaded {file} document": doc.page_content
                        }
                    add_document_ai_to_es( username, conver_id, docs1,doc_index,'upload1')
                for f in files_dir:
                    os.remove(f)
                    
                if ind_response1==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload1 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            
            elif action_type == 'action3':
                # print('entered third condition')
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                print("conver_id",conver_id)
                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/upload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                
                delete_document_content(doc_index,conver_id,'upload2')
                files_dir = glob(upload_dir+'/*')
                ind_response2,docs2=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                if docs2:
                    for doc in docs2:
                        doc.page_content = {
                            f"context of the second uploaded {file} document": doc.page_content
                        }
                    add_document_ai_to_es( username, conver_id, docs2,doc_index,'upload2')
                
                for f in files_dir:
                    os.remove(f)
                # print("response status",ind_response2)
                if ind_response2==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload2 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})

    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        return JsonResponse({'error': str(e)})
    
@csrf_exempt
def imgtotext(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            # global session_id
            llm = ChatOpenAI(
                    model="gpt-4o",
                )
            # request.session.clear()
            if action_type == 'action1':
                # print('image to text section')

                username =request.POST.get('username')
                session_id=request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')


                
                update_create_at_with_sessionid(imtext_index, session_id)
                input_token_cost_gpt4_0125_preview = num_tokens_from_string(user_prompt, 'gpt-4o') 
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gpt4_0125_preview,'gpt-4o')                
                
                message = HumanMessage(
                    content=[
                        {"type": "text", "text": user_prompt},
                    ],
                )

                # Retrieve chat history for context
                chat_history = retrieve_chat_history(es, imtext_index, session_id)
                # print('chat_history',chat_history)
                try:
                    # Convert retrieved history back to messages
                    past_messages = []
                    for entry in chat_history:
                        # Check if '_source' contains 'messages'
                        if "_source" in entry and "messages" in entry["_source"]:
                            messages = img_message_from_dict(entry["_source"]["messages"])
                            # Ensure messages is a list and extend the past_messages list
                            if isinstance(messages, list):
                                past_messages.extend(messages)
                            
                    # Add the new user input to the conversation
                    new_message = message
                    past_messages.append(new_message)
                    print('past_messages',past_messages)
                
                    # Get AI response
                    response = query_and_combine(past_messages,user_prompt)
                    # response = llm.invoke(past_messages)
                    chat_hist_txt(username,session_id,message,response,es,imtext_index)
                    # add_assistant_message_to_es(username,session_id, response.content)
                    output_token_cost_gpt4_0125_preview = num_tokens_from_string(response.content, 'gpt-4o')
    
                    add_assistant_message_to_es(candidate,username,session_id, response.content,output_token_cost_gpt4_0125_preview,'gpt-4o')
                    org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)

                    return JsonResponse({'response': response.content,'session_id':session_id})
                
                except:
                    new_message = message
                    # response = llm.invoke(past_messages)
                    response = query_and_combine(past_messages,user_prompt)
                    chat_hist_txt(username,session_id,message,response,es,imtext_index)
                    # add_assistant_message_to_es(username,session_id, response.content)
                    output_token_cost_gpt4_0125_preview = num_tokens_from_string(response.content, 'gpt-4o')
                    add_assistant_message_to_es(candidate,username,session_id, response.content,output_token_cost_gpt4_0125_preview,'gpt-4o')
                    org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)

                    return JsonResponse({'response': response.content,'session_id':session_id})
                
            
            elif action_type == 'action2':
                # print('entered second condition')
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                user=request.POST.get('username')

                # print("conver_id",conver_id)
                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/image_pth'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                # ind_response=Image_indexing(upload_dir,'image_datas',conver_id)
                
                image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')

                # Use glob to get all supported image files (jpg, jpeg, png, bmp)
                supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                image_files = []
                for ext in supported_formats:
                    image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))

                # Iterate through each image file
                for image_file in image_files:
                    # Resize and convert the image if necessary
                    processed_image_path = resize_and_convert_image(image_file)
                    
                try:      
                    for i in glob(upload_dir+'/*'):
                        if i.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')):
                            im64=encode_image1(i)
                            message = HumanMessage(
                                content=[
                                    {"type": "text", "text": "Extract the texts in the image."},
                                    {
                                        "type": "image_url",
                                        "image_url": {"url": f"data:image/jpeg;base64,{im64}"},
                                    },
                                ],
                            )
                            
                            response = llm.invoke([message])
                            chat_hist_img(im64,user,message,response,conver_id,imtext_index,es)
                        else:
                            ind_response1,docs1=add_documents(i)
                            file=i.split('/')[-1]
                            message = HumanMessage(
                                content=[
                                    {"type": "text", "text": "context of the uploaded {file} document"},
                                ],
                            )
                            chat_hist_txt(user,conver_id,message,docs1,es,imtext_index)
                    ind_response=1
                except Exception as e:
                    ind_response=0
                
                files_dir = glob(upload_dir+'/*')
                for f in files_dir:
                    os.remove(f)
                if ind_response==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg,'result':response.content})
                else:
                    res_msg='Unable to process image.'
                    return JsonResponse({'status': 'failed', 'message':res_msg,'result':''})

    except Exception as e:
            logger.exception('Exception occured due to %s' % e)
            return JsonResponse({'error': str(e)})

@csrf_exempt
def text_to_speech(request):
    if request.method == 'POST':
        print('entered')
        input_text = request.POST.get('message')
        if not input_text:
            return JsonResponse({'error': 'No input text provided'}, status=400)

        try:
            print('input_text',input_text)
            # Fetch TTS from OpenAI
            response = openai.audio.speech.create(
                model="tts-1",
                voice="alloy",
                input=input_text
            )
            # Stream the audio content to memory
            audio_content = response.content
            # Convert the audio content to base64
            base64_encoded_audio = base64.b64encode(audio_content).decode('utf-8')

            return JsonResponse({'audio': base64_encoded_audio})
        except Exception as e:
            return JsonResponse({'error': str(e)}, status=500)
    return JsonResponse({'error': 'Invalid request method'}, status=405)

@csrf_exempt
def update_thumbs_flag(request):
# Search for the document using session ID and answer field
    try:
        # session_id,index_name,answer,
        if request.method == 'POST':
            print('thumbs flag')
            session_id = request.POST.get('session_id')
            index_name =request.POST.get('indexname')
            answer=request.POST.get('answer')
            thumbs_flag=request.POST.get('thumbs_flag')
            fixed_answer = fix_syntax_error(answer)
            sleep(10)
            
            response = es.search(
                index=index_name,
                body={
                    "query": {
                        "bool": {
                            "must": [
                                {"term": {"session_id": session_id}},  # assuming session_id is a keyword field
                                # {
                                #     "match": {
                                #         "history": {
                                #             "query": fixed_answer,
                                #             "operator": "AND"  # Match with AND operator
                                #         }
                                #     }
                                # }
                            ]
                        }
                    },
                    "size": 1000  # Adjust size to the number of documents you expect, e.g., 1000
                }
            )
            
            # print('response length',len(response))

            # Check if any documents are found
            total_docs = response['hits']['total']['value']
            logger.info('total_docs%s'%total_docs)
            if total_docs > 0:
                for hit in response['hits']['hits']:
                    doc_id = hit['_id']  # Get document ID
                    if 'answer' in hit['_source'] :
                        doc_source = hit['_source']['answer']  # Get the document source
                        if doc_source==answer:
                            # print('yes')
                            es.update(
                                index=index_name,
                                id=doc_id,
                                body={
                                    "doc": {
                                        "feedback": thumbs_flag,  # Update the feedback flag with the value 'good', 'bad', or 'no feedback'
                                    },
                                    "doc_as_upsert": True  # Create the document if it doesn't exist
                                }
                            )
                return JsonResponse({'status': 'success', 'message': 'Document updated'})
    
            else:
                return JsonResponse({'status': 'failure', 'message': 'No documents found'}, status=404)
        
    except NotFoundError:
        print(f"Error: No documents found for session_id {session_id} and answer '{answer}'.")


@csrf_exempt
def speech_to_text(request):
    if request.method == 'POST':
        if 'file' in request.FILES:
            audio_file = request.FILES['file']
            try:
                # Convert InMemoryUploadedFile to bytes
                audio_bytes = audio_file.read()

                client = OpenAI()
                transcription = client.audio.transcriptions.create(
                    model="whisper-1", 
                    file=(audio_file.name, audio_bytes),
                    response_format="text"
                )
                return JsonResponse({'transcription': transcription})
            except Exception as e:
                return JsonResponse({'error': str(e)}, status=500)
        else:
            return JsonResponse({'error': 'No file provided'}, status=400)
    else:
        return JsonResponse({'error': 'Invalid request method'}, status=405)


def today_messages(request):
    try:
        t=[]
        username = request.headers.get('X-Username')
        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index)
        else:
            test = get_messages_by_user_and_session(username,es_index_name)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_today <= lastmodified_at_datetime <= current_datetime:
                t.append(item)
        if t:
            #session_ids = list(set(d["session_id"] for d in t))
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    chat_h={}
                    imtex={}
                    for document in result["hits"]["hits"]:
                        if len(document["_source"])==6:
                            chat_h= {
                                'system': document["_source"]['img']}
                            if document["_source"]["messages"][0]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][0]['data']['content']}
                            elif document["_source"]["messages"][1]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][1]['data']['content']}
                            chat_h=chat_h,imtex
                        elif len(document["_source"])==5:
                            if document["_source"]['messages'][0]['type']=='ai':
                                chat_h= {'user': document["_source"]['messages'][1]['data']['content']},{'assistant': document["_source"]['messages'][0]['data']['content']}
                            else:
                                if len(document["_source"]['messages'])==2:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']},{'assistant': document["_source"]['messages'][1]['data']['content']}
                                else:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']}
                        tot_hist.append(chat_h)
                                
                    flattened_list = []                        
                    # Iterate over each tuple in the list
                    for item1, item2 in tot_hist:
                        # Extend the flattened list with both dictionaries
                        flattened_list.extend([item1, item2])
                        
                    
                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':flattened_list,
                        'content': chat_id_names
                    }
                    
                    items.append(yest_dict)
            else:
                print('second condition')
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names
                        }
                        
                        items.append(yest_dict)
                

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        # print('today',formatted_yest_dict)
        # print('today messge',formatted_yest_dict)
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        print(f"Error in today_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)

def yesterday_messages(request):
    try:
        y=[]
        username = request.headers.get('X-Username')
        print('username',username)
        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index)
        else:
            test = get_messages_by_user_and_session(username,es_index_name)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        end_of_yesterday = start_of_today - timedelta(microseconds=1) 
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_yesterday <= lastmodified_at_datetime <= end_of_yesterday:
                y.append(item)
        if y:
            if username =='image_logs':
                print('yesterday section')
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    chat_h={}
                    imtex={}
                    for document in result["hits"]["hits"]:
                        if len(document["_source"])==6:
                            chat_h= {
                                'system': document["_source"]['img']}
                            if document["_source"]["messages"][0]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][0]['data']['content']}
                            elif document["_source"]["messages"][1]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][1]['data']['content']}
                            chat_h=chat_h,imtex
                        elif len(document["_source"])==5:
                            if document["_source"]['messages'][0]['type']=='ai':
                                chat_h= {'user': document["_source"]['messages'][1]['data']['content']},{'assistant': document["_source"]['messages'][0]['data']['content']}
                            else:
                                if len(document["_source"]['messages'])==2:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']},{'assistant': document["_source"]['messages'][1]['data']['content']}
                                else:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']}
                        tot_hist.append(chat_h)
                                
                    flattened_list = []                        
                    # Iterate over each tuple in the list
                    for item1, item2 in tot_hist:
                        # Extend the flattened list with both dictionaries
                        flattened_list.extend([item1, item2])
                        
                    
                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':flattened_list,
                        'content': chat_id_names
                    }
                    
                    items.append(yest_dict)
            else:
                print('second condition')
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names
                        }
                        
                        items.append(yest_dict)
                   

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        # print('yesterday',formatted_yest_dict)
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        print(f"Error in yesterday_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)
    
def previous_7_days(request):
    try:
        s=[]
        username = request.headers.get('X-Username')
        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index)
        else:
            test = get_messages_by_user_and_session(username,es_index_name)        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)     
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_seven_days_ago <= lastmodified_at_datetime < start_of_yesterday:
                s.append(item)

        if s:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    chat_h={}
                    imtex={}
                    for document in result["hits"]["hits"]:
                        if len(document["_source"])==6:
                            chat_h= {
                                'system': document["_source"]['img']}
                            if document["_source"]["messages"][0]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][0]['data']['content']}
                            elif document["_source"]["messages"][1]['type']=='ai':
                                imtex={'assistant': document["_source"]["messages"][1]['data']['content']}
                            chat_h=chat_h,imtex
                        elif len(document["_source"])==5:
                            if document["_source"]['messages'][0]['type']=='ai':
                                chat_h= {'user': document["_source"]['messages'][1]['data']['content']},{'assistant': document["_source"]['messages'][0]['data']['content']}
                            else:
                                if len(document["_source"]['messages'])==2:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']},{'assistant': document["_source"]['messages'][1]['data']['content']}
                                else:
                                    chat_h= {'user': document["_source"]['messages'][0]['data']['content']}
                        tot_hist.append(chat_h)
                                
                    flattened_list = []                        
                    # Iterate over each tuple in the list
                    for item1, item2 in tot_hist:
                        # Extend the flattened list with both dictionaries
                        flattened_list.extend([item1, item2])
                        
                    
                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':flattened_list,
                        'content': chat_id_names
                    }
                    
                    items.append(yest_dict)
            else:
            # session_ids = list(set(d["session_id"] for d in s))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names    
                        }
                        
                        items.append(yest_dict)
                  

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        # print('previous',formatted_yest_dict)

        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        print(f"Error in previous7days_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)
