#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 23:30:08 2024

@author: karthikraj
"""

# views.py
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import json
from datetime import timedelta,datetime,timezone
from time import time
from langchain.chat_models import ChatOpenAI
import os,sys
from langchain.load.dump import dumps
from math import floor
from time import sleep
from django.http import JsonResponse,HttpResponse
import subprocess
import uuid
from natsort import natsorted
from langchain_core.messages import (
    BaseMessage,
    message_to_dict,
    messages_from_dict,
)
from openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import ElasticsearchStore
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages.human import HumanMessage
from langchain_core.messages.ai import AIMessage
import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch,NotFoundError
from collections import OrderedDict
from decouple import config
import tiktoken
import openai
import base64
from django.shortcuts import redirect
from django.core.mail import send_mail
from django.urls import reverse

sys.path.append(os.path.join(os.path.dirname(__file__)))
import LogUtils
logger = LogUtils.getRootLogger()
from django.views.decorators.clickjacking import xframe_options_exempt

@csrf_exempt
def chatbot(request):
    name = request.session.get('name')  # Get 'name' from session
    if name:
        return render(request, "index.html", {'user_name': name})
    else:
        return redirect("login")  # Redirect to login if name is not in session
    
@csrf_exempt
def login_view(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        password = request.POST["password"]
        print('username',username)
        print('password',password)
        logger.info("username%s "%username)

        try:
            # Query Elasticsearch to find the user
            response = es.search(
                index='allibotv5_user_details',
                body={
                    "query": {
                        "bool": {
                            "must": [
                                {"match": {"username": username}},
                                # Ensure passwords are hashed in your index
                            ]
                        }
                    }
                }
            )

            # Check if any user matched the query
            if response['hits']['total']['value'] > 0:
                # If authentication is successful, log the user in (set session data)
                for hit in response['hits']['hits']:
                    source = hit['_source']
                    # print('source',source)
                    if 'password' in source:
                        if source['password']==password:
                            request.session['name'] = username
                        else:
                            return render(request, "login.html", {"error": "Invalid credentials"})
                    else:
                        return render(request, "login.html", {"error": "Invalid credentials"})
                
                logger.info(f"User {username} authenticated successfully. Redirecting to chatbot.")
                return redirect(chatbot)  # Redirect to chatbot view
            else:
                # If authentication fails, re-render the login page with an error
                return render(request, "login.html", {"error": "Invalid credentials"})

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    # If the request is GET, render the login page
    return render(request, "login.html")

@csrf_exempt
def forgot_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        
        try:
            # Search for the user in Elasticsearch
            response = es.search(
                index='allibotv5_user_details',
                body={
                    "query": {
                        "match": {"username": username}
                    }
                }
            )

            if response['hits']['total']['value'] > 0:
                user_data = response['hits']['hits'][0]['_source']
                user_email = user_data.get("mail")  # Attempt to retrieve the email
            
                if user_email:  # Check if email is present
                    print('user_email', user_email)
                    
                    # Send verification email
                    message = "Click the link below to reset your password:\n\n"
                    message += request.build_absolute_uri("/reset-password/")
                    print('message', message)
                    
                    send_mail(
                        subject="Password Reset Request",
                        message=message,
                        from_email="tna@scic.com",
                        recipient_list=[user_email],
                        fail_silently=False,
                    )
                    
                    return render(request, "forgot_password.html", {"message": "Password reset link sent to your email."})
                else:
                    # Handle missing email
                    return render(request, "forgot_password.html", {"error": "Email not found for this user."})
            else:
                return render(request, "forgot_password.html", {"error": "User not found."})

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "forgot_password.html")

@csrf_exempt
def reset_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        new_password = request.POST.get("new_password")

        try:
            # Find the user by username
            response = es.search(
                index='allibotv5_user_details',
                body={
                    "query": {
                        "match": {"username": username}
                    }
                }
            )

            if response['hits']['total']['value'] > 0:
                user_id = response['hits']['hits'][0]['_id']

                # Update the user's password
                es.update(
                    index='allibotv5_user_details',
                    id=user_id,
                    body={
                        "doc": {
                            "password": new_password  # Be sure to hash this in a real application
                        }
                    }
                )
                return render(request, "login.html", {"message": "Password has been reset successfully."})
            else:
                return HttpResponse("User not found.")

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "reset_password.html")


@csrf_exempt
def signout(request):
    del request.session['name']
    login_url = reverse('login')  # Ensure the 'login' view is correctly mapped in your URLs
    return JsonResponse({
        'message': 'Signout successful', 
        'redirect_url': login_url
    })
# https://pro.scic.com/Account/Login/LogOff?returnUrl=%2FSignin%2F&

os.environ["OPENAI_API_KEY"] = config('SECRET_KEY')
logger.info('key caa%s'%config('SECRET_KEY'))
embeddings = OpenAIEmbeddings()
es_index_name =config('es_index_name')
chatname_index=config('chatname_index')
# logger.info(es_index_name)
es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False

try :
    db = ElasticsearchStore(
        es_connection=es,
        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others',"allibot_faq"],
        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
        embedding=embeddings,
        strategy=ElasticsearchStore.ExactRetrievalStrategy()
    )
except Exception as ex:        
    logger.exception('Exception occured due to %s' % ex)
    if check_elastic_status():
        db = ElasticsearchStore(
            es_connection=es,
            index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others',"allibot_faq"],
            embedding=embeddings,
            strategy=ElasticsearchStore.ExactRetrievalStrategy()
        )

def search_by_session_id(index_name, session_id):
    # Define the query
    query = {
        "size": 1000,
        "query": {
            "match": {
                "session_id": session_id
            }
        },
        "sort": [
            {
                "lastmodified_at": {
                    "order": "asc"
                }
            }
        ]
    }

    # Perform the search
    response = es.search(index=index_name, body=query)

    return response['hits']['hits']

def update_lastmodified_at(index_name, doc_id):
    # Get current timestamp in milliseconds
    current_timestamp = int(datetime.now().timestamp() * 1000)
    
    # Define the update body
    update_body = {
        "doc": {
            "lastmodified_at": current_timestamp  # Update with timestamp in milliseconds
        }
    }

    # Perform the update
    es.update(index=index_name, id=doc_id, body=update_body)


def add_user_message_to_es(customer,alliance_number,appname, session_id, user_prompt,index,user_token,modelname,is_safe, flagged_categories):
    # Create a document with the required fields
    doc = {
        'username':customer,
        'alliance_number':alliance_number,
        'modelname':modelname,
        'appname': appname,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(HumanMessage(content=user_prompt)),
            ensure_ascii=True,
        ),
        "human token" : user_token,
        "is_safe":is_safe,
        "flagged_categories":flagged_categories,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    return res

def add_assistant_message_to_es(customer,alliance_number,appname, session_id, answer,assistant_token,modelname):
    # Create a document with the required fields
    doc = {
        'username':customer,
        'alliance_number':alliance_number,
        'modelname':modelname,
        'appname': appname,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(AIMessage(content=answer)),
            ensure_ascii=True,
        ),
        "assistant token" : assistant_token,
        "feedback" : "null",
        
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res

def update_create_at_with_sessionid(index_name,session_id):
    documents = search_by_session_id(index_name, session_id)
    # Update 'lastmodified_at' field for each matched document
    for doc in documents:
        doc_id = doc['_id']  # Get the document ID
        update_lastmodified_at(index_name, doc_id)



def get_chat_id_names_by_session_id(index_name, session_id):
    query = {
        "size": 5000,
        "query": {"term": {"session_id.keyword": session_id}}  # Match the exact session ID
    }

    
    response = es.search(index=index_name, body=query)
    
    chat_id_name=''
    for hit in response['hits']['hits']:
        source = hit['_source']
        if 'chat_id_name' in source:
            chat_id_name = source.get('chat_id_name')
       
    return chat_id_name

def Bot_coach_ques(user_prompt):
    search_query = {
        "size": 10000,
        "query": {
            "match_phrase": {
                "text": user_prompt
            }
        }
    }

    es_index_name='botcoach_index'
        # Execute the search
    if es.indices.exists(index=es_index_name):
        #print('index exists')
        results = es.search(index=es_index_name,  body=search_query)        
        ids=[]
        # #print the results
        for hit in results['hits']['hits']:
            ids.append((hit['_id']))
        print(ids)
    return ids

def unique_sources_with_pages(meta_data):
    unique_sources = set()
    
    for item in meta_data:
        source = item.get('source', '') 
        # print('main',source)
        filename = os.path.basename(source)  # Get the file name with extension
        name_without_ext = os.path.splitext(filename)[0]  
        # print('name_without_ext',name_without_ext)
        unique_sources.add(name_without_ext)
    
    # Convert the set of unique sources to a comma-separated string
    unique_sources_str = ', '.join(unique_sources)
    
    return unique_sources_str 

def num_tokens_from_string(string: str, model: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.encoding_for_model(model)
    num_tokens = len(encoding.encode(string))
    return num_tokens

def org_token(input_tokens, output_tokens, org_name):
    # Search for the document with the given organization name
    org_toks = es.search(
        index="organization_level_alli_v2_token_count",
        body={
            'query': {
                'match': {
                    'organization_name': org_name
                }
            }
        }
    )['hits']['hits']
    if org_toks:
        for doc in org_toks:
            doc_id = doc['_id']
            doc_source = doc['_source']
        # Retrieve existing input/output tokens with default values
        inp = doc_source.get('input_overall_tokens', 0)
        outp = doc_source.get('output_overall_tokens', 0)
        # assigned_tokens = doc_source.get("assigned_tokens", 0)

 
        input_overall_tokens = input_tokens + inp
        output_overall_tokens = output_tokens + outp
        overall_tokens = input_overall_tokens + output_overall_tokens
        # This is hard coded now but have to change dynamically change
        tokens_usable=100000
        percentage=floor((overall_tokens/tokens_usable)*100)
 
 
        # Document to be updated
        update_doc = {
            'doc': {
                'output_overall_tokens': output_overall_tokens,
                'input_overall_tokens': input_overall_tokens,
                'overall_tokens': overall_tokens,
                'organization_name': org_name,
                'percenatge': percentage,
            }
        }

 
        # Update the document using its ID
        res = es.update(index="organization_level_alli_v2_token_count", id=doc_id, body=update_doc)
        print(f"Updated document ID: {doc_id}, Response: {res}")

def user_org_tokens(org_name,candidatename, username, input_tokens, output_tokens):
    # Search for documents where organization_name matches
    user_tokens = es.search(
        index='risk_insurance_education_alli_v2',
        body={
            'query': {
                'bool': {
                    'must': [
                        {'match': {'organization_name': org_name}}
                    ],
                    'should': [
                        {'match': {'username': username}},
                        {'bool': {'must_not': {'exists': {'field': 'username'}}}}
                    ]
                }
            }
        }
    )['hits']['hits']

    # Check if any document was found for the organization
    if user_tokens:
        username_found = False
        for doc in user_tokens:
            doc_id = doc['_id']
            doc_source = doc['_source']

            # Check if username matches the current document
            if doc_source.get('username') == username:
                username_found = True

                # Retrieve existing input/output tokens with default values
                inp = doc_source.get('input_tokens', 0)
                outp = doc_source.get('output_tokens', 0)

                # Update the token values
                input_overall_tokens = input_tokens + inp
                output_overall_tokens = output_tokens + outp    
                overall_tokens = input_overall_tokens + output_overall_tokens

                # Prepare the updated document
                updated_doc = {
                    'doc': {
                        'participant_name':candidatename,
                        'username': username,
                        'input_tokens': input_overall_tokens,
                        'output_tokens': output_overall_tokens,
                        'overall_tokens': overall_tokens,
                        'organization_name': org_name,
                    }
                }

                # Update the document in Elasticsearch
                es.update(index="risk_insurance_education_alli_v2", id=doc_id, body=updated_doc)
                print(f"Document updated for username: {username}")
                break

        # If no document with the username is found but organization exists, create a new document
        if not username_found:
            new_doc = {
                'participant_name':candidatename,
                'username': username,
                'input_tokens': input_tokens,
                'output_tokens': output_tokens,
                'overall_tokens': input_tokens + output_tokens,
                'organization_name': org_name
            }
            es.index(index="risk_insurance_education_alli_v2", body=new_doc)
            print(f"New document created for username: {username}")

    else:
        print(f"No document found for organization: {org_name}")

@csrf_exempt
def moderate_input(text):
    """
    Check user input using the Moderation API.
    Returns a tuple (is_safe, flagged_categories).
    """
    try:

        client = OpenAI()
 
        response = client.moderations.create(
            model="omni-moderation-latest",
            input=text,
        )
        a=response.to_dict()
        flagged = a["results"][0]["flagged"]
        categories = a["results"][0]["categories"]
        return not flagged, categories if flagged else {}
    except Exception as e:
        # logger.exception(f"Moderation API failed: {e}")
        print(f"Moderation API failed: {e}")
        return True, {}  # Assume safe if API fails


@csrf_exempt
def allibot(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            # name = request.session.get('name')  # Correct way to get 'name' from session
            # alliance_number = request.session.get('alliance_number')
            alliance_number='CA-12345'
            username = request.session.get('name')

            # alliance_number='123'

            #logger.info('alliance_number %s'%alliance_number)
            # request.session.clear()
            if action_type == 'action1':
                appname =request.POST.get('appname')
                session_id=request.POST.get('session_id')
                # username =request.POST.get('username')
                # logger.info('bot func username %s'%username)

                org_name=request.POST.get('organization_name')
                #logger.info('session id %s'%session_id)
                print('/n')
                user_prompt = request.POST.get('message')
                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4_0125_preview = num_tokens_from_string(user_prompt, 'gpt-4-turbo') 
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_user_message_to_es(username,alliance_number,appname,session_id, user_prompt,es_index_name,input_token_cost_gpt4_0125_preview,'gpt-4-turbo',is_safe, flagged_categories)

                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you. If there is any MCQ question explain why the choice is correct.
                    If you do not know the answer to the question, simply respond with "That's a great question! I'm not sure of the answer right now.  Can you ask your question a different way? I am excited to assist you further!".
                    Response must be in and around 200 words. it must be in paragraph manner and it must not exceed 4 paragraphs.
                    Give a line of space between the paragraphs.
                    If questions are asked where there is no relevant context available, simply respond with "That's a great question! I'm not sure of the answer right now.  Can you ask your question a different way? I am excited to assist you further!"
                    Context: {context}
            
            
                    {chat_history}
                    Human: {question}
                    Assistant:"""
                    
                    prompt = PromptTemplate(input_variables=["context", "chat_history", "question"], template=template)
                
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
                              
                    sleep(1)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": session_id}},  # Use match instead of term
                            sort="lastmodified_at:asc",
                        )
                    if result and len(result["hits"]["hits"]) > 0:
                        items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        items = []
        
                    session_chat_hist=messages_from_dict(items)
                    if session_chat_hist:
                        for i in range(0,len(session_chat_hist)):
                            if type(session_chat_hist[i]) == HumanMessage:
                                memory.chat_memory.add_user_message(session_chat_hist[i].content)
                            elif type(session_chat_hist[i]) == AIMessage:
                                memory.chat_memory.add_ai_message(session_chat_hist[i].content)
                    
                    # if Bot_coach_ques(user_prompt):
                    #     doc_count=1
                    # else:
                    #     doc_count=4
                                    
                    qa = ConversationalRetrievalChain.from_llm(
                        llm=ChatOpenAI(model="gpt-4-turbo"),retriever=db.as_retriever(search_kwargs={"k": 4}), memory=memory,
                        combine_docs_chain_kwargs={'prompt': prompt},return_source_documents=True)
            
                    formatted_response = qa(user_prompt)
                    answer=formatted_response['answer']
                    #logger.info('answer%s'%answer)
                    # chat_history_1.add_ai_message(answer)
                    output_token_cost_gpt4_0125_preview = num_tokens_from_string(answer, 'gpt-4-turbo')
                    add_assistant_message_to_es(username,alliance_number,appname,session_id, answer,output_token_cost_gpt4_0125_preview,'gpt-4-turbo')
                    org_token(input_token_cost_gpt4_0125_preview, output_token_cost_gpt4_0125_preview, org_name)
                    user_org_tokens(org_name,username,alliance_number,input_token_cost_gpt4_0125_preview,output_token_cost_gpt4_0125_preview)
                    
                    source=formatted_response['source_documents']
                    # logger.info('source%s'%source)
                    meta_data=[]
                    for sor in source:
                        meta_data.append(sor.metadata)
                    unique_entries_mod_str = unique_sources_with_pages(meta_data)
                    try:
                        
                        json_str=dumps(''.join([doc._lc_kwargs['page_content']+'_lc_kwargs' for doc in formatted_response['source_documents']]))         
                        user_ques=dumps(user_prompt)
                        answer_prompt=dumps(answer)
                        unique_entries_str=dumps(unique_entries_mod_str)
                        # logger.info('user_ques%s'%user_ques)
                        cmd=f"python3 /var/www/html/AlliBot_CAA/AlliBot_R/Quesgen_answer_relevancy.py {user_ques} {answer_prompt} {json_str} {unique_entries_str}"                     # logger.info(cmd)
                        os.popen(cmd)
                        # logger.info('command executed')
                    except Exception as e:
                        logger.exception('Exception occured due to %s' % e)
                        logger.info('first exception%s'%e)
    
                    return JsonResponse({'response': answer,'session_id':session_id})
            else:
                return JsonResponse({
                    'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                    'session_id':session_id,
                    'flagged_categories': flagged_categories
                })    

    except Exception as e:
        logger.exception('Exception occured due to %s' % e)
        logger.info('second exception%s'%e)

        return JsonResponse({'error': str(e)})

    


@csrf_exempt
def update_thumbs_flag(request):
# Search for the document using session ID and answer field
    try:
        # session_id,index_name,answer,
        if request.method == 'POST':
            print('thumbs flag')
            session_id = request.POST.get('session_id')
            index_name =request.POST.get('indexname')
            answer=request.POST.get('answer')
            thumbs_flag=request.POST.get('thumbs_flag')
            # fixed_answer = fix_syntax_error(answer)
            sleep(2)
            
            response = es.search(
                index=index_name,
                body={
                    "size": 5000,
                    "query": {"term": {"session_id.keyword": session_id}}  # Match the exact session ID
                }
            )
            
            # print('response length',len(response))

            # Check if any documents are found
            total_docs = response['hits']['total']['value']
            print('total_docs',total_docs)
            if total_docs > 0:
                for hit in response['hits']['hits']:
                    doc_id = hit['_id']  # Get document ID
                    if 'answer' in hit['_source'] :
                        doc_source = hit['_source']['answer']  # Get the document source
                        if doc_source==answer:
                            # print('yes')
                            es.update(
                                index=index_name,
                                id=doc_id,
                                body={
                                    "doc": {
                                        "feedback": thumbs_flag,  # Update the feedback flag with the value 'good', 'bad', or 'no feedback'
                                    },
                                    "doc_as_upsert": True  # Create the document if it doesn't exist
                                }
                            )
                return JsonResponse({'status': 'success', 'message': 'Document updated'})
    
            else:
                return JsonResponse({'status': 'failure', 'message': 'No documents found'}, status=404)
        
    except NotFoundError:
        print(f"Error: No documents found for session_id {session_id} and answer '{answer}'.")

@csrf_exempt
def text_to_speech(request):
    if request.method == 'POST':
        print('entered')
        input_text = request.POST.get('message')
        if not input_text:
            return JsonResponse({'error': 'No input text provided'}, status=400)

        try:
            print('input_text',input_text)
            # Fetch TTS from OpenAI
            response = openai.audio.speech.create(
                model="tts-1",
                voice="alloy",
                input=input_text
            )
            # Stream the audio content to memory
            audio_content = response.content
            # Convert the audio content to base64
            base64_encoded_audio = base64.b64encode(audio_content).decode('utf-8')

            return JsonResponse({'audio': base64_encoded_audio})
        except Exception as e:
            return JsonResponse({'error': str(e)}, status=500)
    return JsonResponse({'error': 'Invalid request method'}, status=405)


def reencode_audio(input_file_path, output_file_path):
    """
    Re-encodes the input audio file to mono and 16kHz, saving it as a WAV file.
    
    Parameters:
    input_file_path (str): Path to the input audio file.
    output_file_path (str): Path to save the re-encoded output file.
    
    Returns:
    str: Path to the re-encoded output file.
    """
    # Use ffmpeg to re-encode the audio file
    ffmpeg_command = [
        'ffmpeg', '-y',  # '-y' overwrites the output file if it exists
        '-i', input_file_path,  # Input file
        '-ac', '1',  # Convert to mono audio
        '-ar', '16000',  # Set the sampling rate to 16 kHz
        output_file_path  # Output file
    ]
    
    # Execute the ffmpeg command
    subprocess.run(ffmpeg_command, check=True)
    
    return output_file_path

@csrf_exempt
def speech_to_text(request):
    if request.method == 'POST':
        if 'file' in request.FILES:
            audio_file = request.FILES['file']
            # logger.info('filename%s'%audio_file.name)
            # logger.info('MIME type:%s'%audio_file.content_type)

            try:
                
                audio_bytes = audio_file.read()
                
                # Generate a unique filename using UUID for uniqueness
                unique_filename = f"{uuid.uuid4()}_{audio_file.name}"

                # Save the received audio file locally (if needed for future reference)
                uploads_dir = os.path.join(os.path.dirname(__file__))+'/files_audio'
                uploads_dir_encoded = os.path.join(os.path.dirname(__file__))+'/files_encoded_audio'

                os.makedirs(uploads_dir, exist_ok=True)  # Create the directory if it doesn't exist
                os.makedirs(uploads_dir_encoded, exist_ok=True)  # Create the directory if it doesn't exist


                file_path = os.path.join(uploads_dir, unique_filename)
                
                with open(file_path, 'wb') as f:
                    f.write(audio_bytes)  # Write the bytes to a local file
                
                logger.info(f'File saved to {file_path}')
                reencode_audio(uploads_dir+'/'+unique_filename,uploads_dir_encoded+'/'+unique_filename)

                # Process the audio with OpenAI's Whisper model
                client = OpenAI()
                audio_file = open(uploads_dir_encoded+'/'+unique_filename, "rb")
                transcription = client.audio.transcriptions.create(
                    model="whisper-1", file=audio_file, response_format="text"
                )
                # transcription = client.audio.transcriptions.create(
                #     model="whisper-1", 
                #     file=(audio_file.name, audio_bytes),
                #     response_format="text"
                # )
                os.remove(uploads_dir+'/'+unique_filename)
                os.remove(uploads_dir_encoded+'/'+unique_filename)

                logger.info('Transcription%s'%transcription)
                
                return JsonResponse({'transcription': transcription})
            except Exception as e:
                logger.error(f'Error processing file: {e}')
                return JsonResponse({'error': str(e)}, status=500)
        else:
            return JsonResponse({'error': 'No file provided'}, status=400)
    else:
        return JsonResponse({'error': 'Invalid request method'}, status=405)


def get_messages_by_user_and_session(username):
    # print('get username',username)
    query = {
       "query": {
           "bool": {
               "must": [
                   {"match": {"username": username}},
               ]
           }
       },
       "size": 10000,
       "sort": [
           {"lastmodified_at": {"order": "asc"}}
       ]
    }

    # Execute the search query
    response = es.search(index=es_index_name, body=query)

    # Extract and return the messages
    messages = [hit['_source'] for hit in response['hits']['hits']]
    return messages    

@csrf_exempt
def activate_query_conversation(request):
    if request.method == "POST":
        try:
            # Get data from POST request
            organization_name = request.POST.get("organization_name")
            username = request.POST.get("username")

            # Construct the Elasticsearch query
            query = {
                "query": {
                    "bool": {
                        "must": [
                            {"match": {"organization_name": organization_name}},
                            {"match": {"username": username}},
                        ]
                    }
                }
            }

            # Search the index for matching records
            response = es.search(index="risk_insurance_education", body=query)

            # Variable to hold the conversation activation state
            activate_conv = True

            # Process the results
            for hit in response["hits"]["hits"]:
                assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                overall_tokens = hit["_source"].get("overall_tokens", 0)

                # If assigned tokens exist but overall tokens are missing
                if assigned_tokens and not overall_tokens:
                    activate_conv = True

                # If both assigned and overall tokens are present
                elif assigned_tokens and overall_tokens:
                    # Calculate the difference
                    sub_res = assigned_tokens - overall_tokens

                    # Check conditions and set activation status
                    if sub_res < 0 or sub_res <= 100:
                        activate_conv = False
                    else:
                        activate_conv = True

            # Return the response with the activation status
            return JsonResponse({"activate_conv": activate_conv})

        except Exception as e:
            # Log the error and return an error response
            logger.info("check token index %s" % e)
            return JsonResponse({"error": str(e)}, status=500)
    else:
        # Return a method not allowed error for non-POST requests
        return JsonResponse({"error": "Method not allowed"}, status=405)



def today_messages(request):
    try:
        t = []
        username = request.headers.get('X-Username')
        page = int(request.GET.get('page', 1))  # Default to page 1 if not provided
        limit = 50 # Define the limit for each page (number of messages per page)
        offset = (page - 1) * limit  # Calculate offset based on the page number

        # Fetch messages with enough records to account for offset
        test = get_messages_by_user_and_session(username)

        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)

        # Filter messages for today
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_today <= lastmodified_at_datetime <= current_datetime:
                t.append(item)

        t = sorted(t, key=lambda item: item['lastmodified_at'],reverse=True)
        

        # Apply pagination after filtering
        paginated_today_messages = t[offset:min(offset + limit, len(t))] # Apply offset and limit after filtering
        paginated_today_messages = paginated_today_messages[::-1]

        # Check if there are more messages beyond the current page
        has_more = len(t) >= min(offset + limit, len(t)) 

        if paginated_today_messages:
            session_ids = list(OrderedDict.fromkeys(d["session_id"] for d in paginated_today_messages))
            items = []
            for id_user in session_ids[::-1]:
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": id_user}},
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                      for document in result["hits"]["hits"])

                    tot_hist = []
                    for i in session_hist:
                        chat_h = {'user': i.content} if isinstance(i, HumanMessage) else {'assistant': i.content}
                        tot_hist.append(chat_h)

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                    items.append({
                        'session_id': id_user,
                        'chat_history': tot_hist,
                        'content': chat_id_names
                    })

            formatted_today_dict = {"messages": items, "has_more": has_more}
        else:
            formatted_today_dict = {"messages": [], "has_more": False}
        
        # print('formatted_today_dict', formatted_today_dict)
        return JsonResponse({'messages': formatted_today_dict})

    except Exception as e:
        print(f"Error in today_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)

def yesterday_messages(request):
    try:
        y = []
        username = request.headers.get('X-Username')
        page = int(request.GET.get('page', 1))  # Default to page 1 if not provided
        limit = 50  # Define the limit for each page (number of messages per page)
        offset = (page - 1) * limit  # Calculate offset based on the page number

        # Fetch messages with enough records to account for offset
        test = get_messages_by_user_and_session(username)

        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        end_of_yesterday = start_of_today - timedelta(microseconds=1) 
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_yesterday <= lastmodified_at_datetime <= end_of_yesterday:
                y.append(item)
                
        y = sorted(y, key=lambda item: item['lastmodified_at'],reverse=True)
                
        # Apply pagination after filtering
        paginated_yesterday_messages = y[offset:min(offset + limit, len(y))] # Apply offset and limit after filtering
        paginated_yesterday_messages = paginated_yesterday_messages[::-1]

        has_more = len(y) >= min(offset + limit, len(y))   # Check if there are more messages beyond the current page

        if paginated_yesterday_messages:
            session_ids = list(OrderedDict.fromkeys(d["session_id"] for d in paginated_yesterday_messages))
            items = []
            for id_user in session_ids[::-1]:
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": id_user}},
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                      for document in result["hits"]["hits"])

                    tot_hist = []
                    for i in session_hist:
                        chat_h = {'user': i.content} if isinstance(i, HumanMessage) else {'assistant': i.content}
                        tot_hist.append(chat_h)

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                    items.append({
                        'session_id': id_user,
                        'chat_history': tot_hist,
                        'content': chat_id_names
                    })

            formatted_yesteday_dict = {"messages": items, "has_more": has_more}
        else:
            formatted_yesteday_dict = {"messages": [], "has_more": False}
        return JsonResponse({'messages': formatted_yesteday_dict})

    except Exception as e:
        print(f"Error in yesteday_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)
    
def previous_7_days(request):
    try:
        s = []
        username = request.headers.get('X-Username')
        page = int(request.GET.get('page', 1))  # Default to page 1 if not provided
        # print('page',page)
        limit = 50  # Define the limit for each page (number of messages per page)
        offset = (page - 1) * limit  # Calculate offset based on the page number
        # print('offset',offset)
        # Fetch messages with enough records to account for offset
        test = get_messages_by_user_and_session(username)
        # print('test',test)
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)     
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_seven_days_ago <= lastmodified_at_datetime < start_of_yesterday:
                s.append(item)
        
        s = sorted(s, key=lambda item: item['lastmodified_at'],reverse=True)

                
        # print('len of previous7days',s)
                
        paginated_previous7days_messages = s[offset:min(offset + limit, len(s))]
        paginated_previous7days_messages = paginated_previous7days_messages[::-1]

        # print('length of paginated_previous7days_messages',paginated_previous7days_messages)
        # paginated_previous7days_messages = s[offset:offset + limit]  # Apply offset and limit after filtering
        has_more = len(s) >= min(offset + limit, len(s))  # Check if there are more messages beyond the current page
        # print('has_more flag',has_more)
        
        if paginated_previous7days_messages:
            session_ids = list(OrderedDict.fromkeys(d["session_id"] for d in paginated_previous7days_messages))
            items = []
            for id_user in session_ids[::-1]:
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": id_user}},
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                      for document in result["hits"]["hits"])

                    tot_hist = []
                    for i in session_hist:
                        chat_h = {'user': i.content} if isinstance(i, HumanMessage) else {'assistant': i.content}
                        tot_hist.append(chat_h)

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                    items.append({
                        'session_id': id_user,
                        'chat_history': tot_hist,
                        'content': chat_id_names
                    })
            
            formatted_previous7days_dict = {"messages": items, "has_more": has_more}
            # print('formatted_previous7days_dict',len(tot_hist))

        else:
            formatted_previous7days_dict = {"messages": [], "has_more": False}
        return JsonResponse({'messages': formatted_previous7days_dict})

    except Exception as e:
        print(f"Error in previous7days_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)
    
def previous_30_days(request):
    try:
        th = []
        username = request.headers.get('X-Username')
        page = int(request.GET.get('page', 1))  # Default to page 1 if not provided
        limit = 50  # Define the limit for each page (number of messages per page)
        offset = (page - 1) * limit  # Calculate offset based on the page number
        # print('page',page)
        # Fetch messages with enough records to account for offset
        test = get_messages_by_user_and_session(username)

        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today, 7 days ago, and 30 days ago in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)
        start_of_thirty_days_ago = start_of_today - timedelta(days=30)
        
        for item in natsorted(test):
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            # Include messages from 30 days ago up to 7 days ago
            if start_of_thirty_days_ago <= lastmodified_at_datetime < start_of_seven_days_ago:
                th.append(item)
        
        th = sorted(th, key=lambda item: item['lastmodified_at'],reverse=True)


        paginated_previous30days_messages = th[offset:min(offset + limit, len(th))]  # Apply offset and limit after filtering
        paginated_previous30days_messages = paginated_previous30days_messages[::-1]

        has_more = len(th) >= min(offset + limit, len(th))  # Check if there are more messages beyond the current page
        # print('paginated_previous30days_messages',paginated_previous30days_messages)
        
        if paginated_previous30days_messages:
            session_ids = list(OrderedDict.fromkeys(d["session_id"] for d in paginated_previous30days_messages))
            items = []
            for id_user in session_ids[::-1]:
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": id_user}},
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                      for document in result["hits"]["hits"])

                    tot_hist = []
                    for i in session_hist:
                        chat_h = {'user': i.content} if isinstance(i, HumanMessage) else {'assistant': i.content}
                        tot_hist.append(chat_h)

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                    items.append({
                        'session_id': id_user,
                        'chat_history': tot_hist,
                        'content': chat_id_names
                    })

            formatted_previous30days_dict = {"messages": items, "has_more": has_more}
        else:
            formatted_previous30days_dict = {"messages": [], "has_more": False}
        return JsonResponse({'messages': formatted_previous30days_dict})

    except Exception as e:
        print(f"Error in previous30days_messages view: {e}")
        return JsonResponse({"error": str(e)}, status=500)
