From 9ade246a18ff415589c4e00ae83f6a0a146dd18e Mon Sep 17 00:00:00 2001 From: HomunMage Date: Thu, 20 Jun 2024 13:04:02 +0800 Subject: [PATCH] clean read me --- 01 LangChain Hello World/README.md | 25 +-- 02 LangGraph Hello World/README.md | 39 +---- 03 Graph Chain Hello World/README.md | 35 ++++ .../main.py | 0 03 Graph Chain Hello World1/README.md | 154 ------------------ 04 State Graph/README.md | 27 +-- 05 Agents with Tool/README.md | 88 ++++++++++ .../trpg_agent.py | 0 05 Agents with Tools/README.md | 1 - 9 files changed, 131 insertions(+), 238 deletions(-) create mode 100644 03 Graph Chain Hello World/README.md rename {03 Graph Chain Hello World1 => 03 Graph Chain Hello World}/main.py (100%) delete mode 100644 03 Graph Chain Hello World1/README.md create mode 100644 05 Agents with Tool/README.md rename 05 Agents with Tools/main.py => 05 Agents with Tool/trpg_agent.py (100%) delete mode 100644 05 Agents with Tools/README.md diff --git a/01 LangChain Hello World/README.md b/01 LangChain Hello World/README.md index 6550846..d843b4e 100644 --- a/01 LangChain Hello World/README.md +++ b/01 LangChain Hello World/README.md @@ -31,17 +31,7 @@ Follow these steps to set up and run the `ollama` server: python main.py ``` -## Script Explanation - -### Importing Libraries - -The script starts by importing the necessary libraries from the `langchain` ecosystem: - -```python -from langchain_community.chat_models import ChatOllama -from langchain_core.prompts import PromptTemplate -from langchain_core.output_parsers import StrOutputParser -``` +## Core Explanation ### Specifying the Local Language Model @@ -86,14 +76,6 @@ llm_chain = prompt | llm | StrOutputParser() generation = llm_chain.invoke(formatted_prompt) ``` -### Printing the Output - -Print the generated output: - -```python -print(generation) -``` - ## Running the Script 1. Ensure the `ollama` server is running as described in the [Setting Up the Ollama Server](#setting-up-the-ollama-server) section. @@ -101,8 +83,3 @@ print(generation) ```sh python main.py ``` - -## Notes - -- Ensure all dependencies are installed and the `ollama` server is running before executing the script. -- Adjust the prompt template and input question as needed to suit your use case. diff --git a/02 LangGraph Hello World/README.md b/02 LangGraph Hello World/README.md index 09683d0..bc5825f 100644 --- a/02 LangGraph Hello World/README.md +++ b/02 LangGraph Hello World/README.md @@ -10,7 +10,6 @@ This tutorials is refer to [Learn LangGraph - The Easy Way](https://www.youtube. - Python 3.11 - `langgraph` library -- `ollama` server (if needed) ### Installing Dependencies @@ -19,33 +18,11 @@ Install the necessary Python package: pip install langgraph ``` -### Setting Up the Ollama Server - -If your project requires the `ollama` server, follow these steps to set up and run it: - -1. Open a terminal and start the `ollama` server: - ```sh - ollama serve - ``` - -2. Open another terminal and run the Python script: - ```sh - python main.py - ``` - -## Script Explanation - -### Importing Libraries - -The script starts by importing the necessary library: - -```python -from langgraph.graph import Graph -``` +## Core Explanation ### Defining Functions -Define two simple functions to be used in the workflow: +Define two simple node to be used in the workflow: ```python def function_1(input_1): @@ -98,15 +75,7 @@ print(output) ## Running the Script -1. Ensure all dependencies are installed as described in the [Installing Dependencies](#installing-dependencies) section. -2. If required, ensure the `ollama` server is running as described in the [Setting Up the Ollama Server](#setting-up-the-ollama-server) section. -3. Run the Python script: +Run the Python script: ```sh python main.py - ``` - -## Notes - -- Ensure all dependencies are installed and the `ollama` server is running before executing the script (if needed). -- Adjust the functions and workflow as needed to suit your use case. - + ``` \ No newline at end of file diff --git a/03 Graph Chain Hello World/README.md b/03 Graph Chain Hello World/README.md new file mode 100644 index 0000000..252c4ce --- /dev/null +++ b/03 Graph Chain Hello World/README.md @@ -0,0 +1,35 @@ +# 03 Graph Chain Hello World + +This project demonstrates the use of the `langgraph` and `langchain-community` libraries to create a workflow graph that interacts with a local language model to generate and process content. + +This tutorials is refer to [Learn LangGraph - The Easy Way](https://www.youtube.com/watch?v=R8KB-Zcynxc). + +## Core Explanation + + +### Defining Functions + +Because we use json format, we can parse that + +```python +def Tool(input): + print("Tool Stage input:" + input) + # Parse the JSON input + data = json.loads(input) + # Extract the "content" and "filename" parts + content = data.get("content", "") + filename = data.get("filename", "output.md") + # Write the content to the specified filename + with open(filename, 'w') as file: + file.write(content) + return input +``` + +## Running the Script + +1. Ensure all dependencies are installed as described in the [Installing Dependencies](#installing-dependencies) section. +2. If required, ensure the `ollama` server is running as described in the [Setting Up the Ollama Server](#setting-up-the-ollama-server) section. +3. Run the Python script: + ```sh + python main.py + ``` \ No newline at end of file diff --git a/03 Graph Chain Hello World1/main.py b/03 Graph Chain Hello World/main.py similarity index 100% rename from 03 Graph Chain Hello World1/main.py rename to 03 Graph Chain Hello World/main.py diff --git a/03 Graph Chain Hello World1/README.md b/03 Graph Chain Hello World1/README.md deleted file mode 100644 index 32542b3..0000000 --- a/03 Graph Chain Hello World1/README.md +++ /dev/null @@ -1,154 +0,0 @@ -# 03 Graph Chain Hello World - -This project demonstrates the use of the `langgraph` and `langchain-community` libraries to create a workflow graph that interacts with a local language model to generate and process content. - -This tutorials is refer to [Learn LangGraph - The Easy Way](https://www.youtube.com/watch?v=R8KB-Zcynxc). - - -## Environment Setup - -### Prerequisites - -- Python 3.11 -- `langgraph`, `langchain-community`, and `langchain-core` libraries - -### Installing Dependencies - -Install the necessary Python packages: -```sh -pip install langgraph langchain-community langchain-core httpx -``` - -### Setting Up the Ollama Server - -If your project requires the `ollama` server, follow these steps to set up and run it: - -1. Open a terminal and start the `ollama` server: - ```sh - ollama serve - ``` - -2. Open another terminal and run the Python script: - ```sh - python main.py - ``` - -## Script Explanation - -### Importing Libraries - -The script starts by importing the necessary libraries: - -```python -from langgraph.graph import Graph -from langchain_community.chat_models import ChatOllama -from langchain_core.prompts import PromptTemplate -from langchain_core.output_parsers import StrOutputParser -import json -``` - -### Specifying the Local Language Model - -Specify the local language model to be used (`phi3` in this case): - -```python -local_llm = "phi3" -``` - -### Initializing the ChatOllama Model - -Initialize the `ChatOllama` model with desired parameters: - -```python -llm = ChatOllama(model=local_llm, format="json", temperature=0) -``` - -### Defining Functions - -Define the `Agent` and `Tool` functions to be used in the workflow: - -```python -def Agent(question): - # Define the prompt template - template = """ - Question: {question} Let's think step by step. - your output format is filename:"" and content:"" - make sure your output is right json - """ - - prompt = PromptTemplate.from_template(template) - - # Format the prompt with the input variable - formatted_prompt = prompt.format(question=question) - - llm_chain = prompt | llm | StrOutputParser() - generation = llm_chain.invoke(formatted_prompt) - - return generation - -def Tool(input): - print("Tool Stage input:" + input) - # Parse the JSON input - data = json.loads(input) - # Extract the "content" and "filename" parts - content = data.get("content", "") - filename = data.get("filename", "output.md") - # Write the content to the specified filename - with open(filename, 'w') as file: - file.write(content) - return input -``` - -### Creating the Workflow Graph - -Create a `Graph` instance and add the defined functions as nodes: - -```python -# Define a Langchain graph -workflow = Graph() - -workflow.add_node("agent", Agent) -workflow.add_node("tool", Tool) -``` - -### Connecting the Nodes - -Connect the nodes to define the workflow: - -```python -workflow.add_edge('agent', 'tool') -``` - -### Setting Entry and Finish Points - -Set the entry and finish points of the workflow: - -```python -workflow.set_entry_point("agent") -workflow.set_finish_point("tool") -``` - -### Compiling and Invoking the Workflow - -Compile the workflow and invoke it: - -```python -app = workflow.compile() - -app.invoke("write an article, content is startup.md ") - -``` - -## Running the Script - -1. Ensure all dependencies are installed as described in the [Installing Dependencies](#installing-dependencies) section. -2. If required, ensure the `ollama` server is running as described in the [Setting Up the Ollama Server](#setting-up-the-ollama-server) section. -3. Run the Python script: - ```sh - python main.py - ``` - -## Notes - -- Ensure all dependencies are installed and the `ollama` server is running before executing the script (if needed). -- Adjust the functions and workflow as needed to suit your use case. diff --git a/04 State Graph/README.md b/04 State Graph/README.md index 8227532..4c01a64 100644 --- a/04 State Graph/README.md +++ b/04 State Graph/README.md @@ -6,31 +6,10 @@ This tutorials is ref from * [Learn LangGraph - The Easy Way](https://www.youtube.com/watch?v=R8KB-Zcynxc). * [langgraph_code_assistant.ipynb](https://github.com/langchain-ai/langgraph/blob/main/examples/code_assistant/langgraph_code_assistant.ipynb) -## Environment Setup +## Core Explanation -### Prerequisites - -- Python 3.11 -- `langgraph` library - -### Installing Dependencies - -Install the necessary Python package: -```sh -pip install langgraph -``` - -## Script Explanation - -### Importing Libraries - -The script starts by importing the necessary libraries: - -```python -from langgraph.graph import StateGraph, END -from typing import TypedDict, Literal, Union -import random -``` +* We need State as global state machine to store info like chat history +* We need conditional edge to make the state machine able to swtich states ### Defining Node and Edge function diff --git a/05 Agents with Tool/README.md b/05 Agents with Tool/README.md new file mode 100644 index 0000000..45a11e5 --- /dev/null +++ b/05 Agents with Tool/README.md @@ -0,0 +1,88 @@ +# Agents with Tool + +This tutorial demonstrated how to create a simple TRPG agent using LangChain and LangGraph. The agent simulates a DM and a Player, facilitating a fast-play DnD game. You can extend this code to add more complex interactions, additional game mechanics, or integrate with other tools and APIs for a richer gaming experience. + + +### Core Explanation + +* We use AgentBase as Agents for different role +* We use Conditional Edge to use Tool + + +#### Base Class for Agent + +Create an abstract base class for tasks. + +```python +class AgentBase(ABC): + def __init__(self, state: TRPGState): + self.state = state + + @abstractmethod + def get_prompt_template(self) -> str: + pass + + def execute(self) -> TRPGState: + self.state["history"] = clip_history(self.state["history"]) + template = self.get_prompt_template() + prompt = PromptTemplate.from_template(template) + formatted_prompt = prompt.format(history=self.state["history"], roll_number=str(self.state["roll_number"])) + llm_chain = prompt | llm | StrOutputParser() + generation = llm_chain.invoke({"history": self.state["history"], "roll_number": self.state["roll_number"]}) + data = json.loads(generation) + self.state["need_roll"] = data.get("need_roll", "") + self.state["roll_number"] = -1 + self.state["history"] += "\n" + generation + self.state["history"] = clip_history(self.state["history"]) + return self.state +``` + +#### Specific Task Classes + +Define specific task classes for DM and Player. + +```python +class DM(AgentBase): + def get_prompt_template(self) -> str: + return """ + {history} + As DnD DM, describe the current scenario for the player. (in short, we do fast play) + sometimes roll dice, sometimes not. + player roll {roll_number}, if > 0 you need explain what the roll affect result, start from your roll {roll_number} blablabla + Output the JSON in the format: {{"scenario": "your action description", "need_roll": True/False}} + """ + +class Player(AgentBase): + def get_prompt_template(self) -> str: + return """ + Here is the scenario: {history} + As a Player, I want to perform an action. (in short, we do fast play) + Output the JSON in the format: {{"action": "I want xxxx"}} + """ +``` + +#### Roll Dice Function + +A function to simulate rolling a 20-sided dice. + +```python +def RollDice(state: TRPGState) -> TRPGState: + random_number = random.randint(1, 20) + state["history"] += "\n" + "roll result:" + str(random_number) + state["history"] = clip_history(state["history"]) + state["need_roll"] = False + state["roll_number"] = random_number + return state +``` + +#### Conditional to use Tool Function + +A function to check if a roll is needed. + +```python +def check_need_roll(state: TRPGState) -> Literal["roll", "not roll"]: + if state.get("need_roll") == True: + return "roll" + else: + return "not roll" +``` diff --git a/05 Agents with Tools/main.py b/05 Agents with Tool/trpg_agent.py similarity index 100% rename from 05 Agents with Tools/main.py rename to 05 Agents with Tool/trpg_agent.py diff --git a/05 Agents with Tools/README.md b/05 Agents with Tools/README.md deleted file mode 100644 index 165bd33..0000000 --- a/05 Agents with Tools/README.md +++ /dev/null @@ -1 +0,0 @@ -pip install langchain