# Gemini "Setting Up Gemini Agentic Workflows" -> The Supervisor Architecture
import os
from typing import Annotated, TypedDict, List
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import BaseMessage, HumanMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages
from dotenv import load_dotenv
# Agent memory management
import sqlite3
from langgraph.checkpoint.sqlite import SqliteSaver

load_dotenv()

# Setup the LLM (Gemini 3 Flash Preview)
llm = ChatGoogleGenerativeAI(
    model="gemini-3-flash-preview",
    google_api_key=os.getenv("GEMINI_API_KEY")
)

class AgentState(TypedDict):
    # 'add_messages' ensures we append to history rather than overwriting
    messages: Annotated[List[BaseMessage], add_messages]
    next_agent: str

# 2. Define specialized "Worker" Nodes

def coder_agent(state: AgentState):
    msg = llm.invoke([HumanMessage(content=f"You are a Senior Coder. Complete this: {state['messages'][-1].content}")])
    return {"messages": [msg]}

def researcher_agent(state: AgentState):
    msg = llm.invoke([HumanMessage(content=f"You are a Research Analyst. Analyze this: {state['messages'][-1].content}")])
    return {"messages": [msg]}

#3. Define the Supervisor (The Brain)

from pydantic import BaseModel, Field
from typing import Literal

class RouterResponse(BaseModel):
    next_step: Literal["Coder", "Researcher", "FINISH"] = Field(description="The next agent to act or FINISH")

def supervisor_node(state: AgentState):
    # Give the supervisor the context of the whole thread
    system_prompt = (
        "You are the Supervisor. Your job is to delegate tasks to the Coder or Researcher. "
        "When the task is complete, return FINISH."
    )

    # Force Gemini to provide a structured routing decision
    structured_llm = llm.with_structured_output(RouterResponse)
    decision = structured_llm.invoke([HumanMessage(content=system_prompt)] + state["messages"])

    return {"next_agent": decision.next_step}

# 4. Construct the Graph

workflow = StateGraph(AgentState)

# Add Nodes
workflow.add_node("Supervisor", supervisor_node)
workflow.add_node("Coder", coder_agent)
workflow.add_node("Researcher", researcher_agent)

# Define Logic: Always go back to Supervisor after a worker finishes
workflow.add_edge("Coder", "Supervisor")
workflow.add_edge("Researcher", "Supervisor")

# Define the Conditional Entrypoint: The Supervisor decides where to go
workflow.set_entry_point("Supervisor")

# The Router logic: Based on supervisor's 'next_agent' state
workflow.add_conditional_edges(
    "Supervisor",
    lambda x: x["next_agent"],
    {
        "Coder": "Coder",
        "Researcher": "Researcher",
        "FINISH": END
    }
)

#app = workflow.compile()

import sqlite3
from langgraph.checkpoint.sqlite import SqliteSaver

# 1. Create a connection to a local DB file
conn = sqlite3.connect("agent_memory.db", check_same_thread=False)
memory = SqliteSaver(conn)

# 2. Compile the graph with the checkpointer
# (Assuming 'workflow' is defined as in our previous step)
app = workflow.compile(checkpointer=memory)

# 3. Define a Thread ID
# This allows you to have multiple independent conversations
config = {"configurable": {"thread_id": "senior-dev-project-001"}}

# 4. Run the agent
# The next time you run this with the SAME thread_id,
# Gemini will remember the previous messages.
input_message = HumanMessage(content="What did we decide on for the coding task?")
for event in app.stream({"messages": [input_message]}, config):
    for value in event.values():
        print(f"Agent Task: {value}")