import pprint

from langchain_core.documents import Document
from langgraph.graph import END, StateGraph

from app.core.rag.nodes.generate import generate_answer
from app.core.rag.nodes.time import time_node
from app.core.rag.state import RAGState
from app.utils import get_logger

logger = get_logger(__name__)

# Build a minimal graph specifically for testing generation
graph = StateGraph(RAGState)

# We wrap generate_answer with time_node as done in your main graph
graph.add_node("generate_answer", time_node(generate_answer))

graph.set_entry_point("generate_answer")
graph.add_edge("generate_answer", END)

rag_app = graph.compile()


def main():
    # Test Case 1: Standard RAG request
    logger.info("--- Test Case 1: Standard RAG Generation ---")

    mock_docs = [
        Document(
            page_content="Technical University of Munich (TUM) offers a world-class MS in CS. No tuition fees for internationals, but a semester fee of 150 Euro applies.",
            metadata={"source": "germany_guide.pdf"},
        ),
        Document(
            page_content="Requirements for CS in Germany: GPA 2.5 or better, TOEFL 90+, and a relevant Bachelor's degree.",
            metadata={"source": "requirements.pdf"},
        ),
    ]

    state_1 = {
        "user_input": "What are the costs and requirements for a Masters in CS in Germany?",
        "profile": {
            "degree_level": "Masters",
            "target_country": "Germany",
            "subject": "Computer Science",
        },
        "retrieved_docs": mock_docs,
    }

    result_1 = rag_app.invoke(state_1)

    # Note: the 'time_node' decorator usually stores timing in the state
    # (assuming your time_node adds a 'metadata' or 'timing' key)
    logger.info(
        f"Time Taken: {result_1.get('node_times', {}).get('generate_answer', 'N/A')}s"
    )
    logger.info(f"LLM Response:\n{result_1.get('response')}")
    print("-" * 30)

    # Test Case 2: Missing Context (Testing the 'Honest' guideline)
    logger.info("--- Test Case 2: Generation with Empty Context ---")

    state_2 = {
        "user_input": "How is the weather in Mars during winter?",
        "profile": {"name": "John"},
        "retrieved_docs": [],  # No documents found
    }

    result_2 = rag_app.invoke(state_2)

    logger.info(
        f"Time Taken: {result_2.get('node_times', {}).get('generate_answer', 'N/A')}s"
    )
    logger.info(f"LLM Response:\n{result_2.get('response')}")


if __name__ == "__main__":
    main()
