Skip to content

Commit 156fd57

Browse files
committed
code cleaning
1 parent 7916ac1 commit 156fd57

File tree

159 files changed

+618
-24854
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

159 files changed

+618
-24854
lines changed

TO-DO-fix.txt

-4
This file was deleted.

agentverse/agents/__init__.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,14 @@
77
from .base import BaseAgent
88
from agentverse.agents.simulation_agent.conversation import ConversationAgent
99
from agentverse.agents.simulation_agent.tool import ToolAgent
10-
from agentverse.agents.simulation_agent.prisoner_dilemma import PoliceAgent, PrisonerAgent
10+
from agentverse.agents.simulation_agent.prisoner_dilemma import (
11+
PoliceAgent,
12+
PrisonerAgent,
13+
)
1114

1215
from agentverse.agents.tasksolving_agent.role_assigner import RoleAssignerAgent
1316
from agentverse.agents.tasksolving_agent.critic import CriticAgent
1417
from agentverse.agents.tasksolving_agent.evaluator import EvaluatorAgent
1518
from agentverse.agents.tasksolving_agent.solver import SolverAgent
1619
from agentverse.agents.tasksolving_agent.manager import ManagerAgent
1720
from agentverse.agents.tasksolving_agent.executor import ExecutorAgent
18-
from agentverse.agents.tasksolving_agent.executor_fc import ExecutorAgent_fc

agentverse/agents/tasksolving_agent/critic.py

-5
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,6 @@ async def astep(
8787
logger.error(e)
8888
logger.warn("Retrying...")
8989
continue
90-
# if parsed_response is None:
91-
# return AgentCriticism(True, "LLM failed.", self)
92-
# parsed_response = AgentCriticism(
93-
# parsed_response.is_agree, parsed_response.criticism, self
94-
# )
9590

9691
if parsed_response is None:
9792
logger.error(f"{self.name} failed to generate valid response.")

agentverse/agents/tasksolving_agent/executor_fc.py

-129
This file was deleted.

agentverse/agents/tasksolving_agent/manager.py

-1
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from string import Template
99
from typing import TYPE_CHECKING, List, Tuple
1010

11-
# from agentverse.environments import PipelineEnvironment
1211
from agentverse.message import Message
1312

1413
from agentverse.agents import agent_registry

agentverse/agents/tasksolving_agent/role_assigner.py

-4
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@
1313
from agentverse.agents import agent_registry
1414
from agentverse.agents.base import BaseAgent
1515

16-
# from agentverse.environments import PipelineEnvironment
17-
1816

1917
logger = get_logger()
2018

@@ -25,8 +23,6 @@ def step(
2523
self, advice: str, task_description: str, cnt_critic_agents: int
2624
) -> RoleAssignerMessage:
2725
logger.debug("", self.name, Fore.MAGENTA)
28-
# prompt = self._fill_prompt_template(advice, task_description, cnt_critic_agents)
29-
# logger.debug(f"Prompt:\n{prompt}", "Role Assigner", Fore.CYAN)
3026
prepend_prompt, append_prompt = self.get_all_prompts(
3127
advice=advice,
3228
task_description=task_description,

agentverse/environments/simulation_env/rules/updater/basic.py

-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ def add_tool_response(
5252
def add_message_to_all_agents(
5353
self, agents: List[BaseAgent], message: Message
5454
) -> bool:
55-
print(message.receiver)
5655
if "all" in message.receiver:
5756
# If receiver is all, then add the message to all agents
5857
for agent in agents:

agentverse/environments/tasksolving_env/basic.py

-19
Original file line numberDiff line numberDiff line change
@@ -20,18 +20,6 @@
2020

2121
@EnvironmentRegistry.register("task-basic")
2222
class BasicEnvironment(BaseEnvironment):
23-
"""
24-
A basic environment implementing the logic of conversation.
25-
26-
Args:
27-
agents: List of agents
28-
rule: Rule for the environment
29-
max_loop_rounds: Maximum number of loop rounds number
30-
cnt_turn: Current round number
31-
last_messages: Messages from last turn
32-
rule_params: Variables set by the rule
33-
"""
34-
3523
rule: TasksolvingRule
3624
agents: Dict[Enum, Union[BaseAgent, List[BaseAgent]]] = None
3725

@@ -60,9 +48,7 @@ def __init__(self, **kwargs):
6048
async def step(
6149
self, advice: str = "No advice yet.", previous_plan: str = "No solution yet."
6250
) -> List[Message]:
63-
# advice = "No advice yet."
6451
result = ""
65-
# previous_plan = "No solution yet."
6652
logs = []
6753

6854
logger.info(f"Loop Round {self.cnt_turn}")
@@ -80,11 +66,6 @@ async def step(
8066
plan: List[SolverMessage] = await self.rule.decision_making(
8167
self.task_description, self.agents, previous_plan, advice
8268
)
83-
# Although plan may be a list in some cases, all the cases we currently consider
84-
# only have one plan, so we just take the first element.
85-
# TODO: make it more general
86-
# plan = plan[0].content
87-
# plan = [p.content for p in plan]
8869
flatten_plan = "\n".join([p.content for p in plan])
8970
logs.append({"module": "Decision Maker", "content": flatten_plan})
9071
logger.info("", f"Decision Plan:\n{flatten_plan}", Fore.YELLOW)

agentverse/environments/tasksolving_env/rules/decision_maker/vertical_solver_first.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class VerticalSolverFirstDecisionMaker(BaseDecisionMaker):
2020
Discuss in a vertical manner.
2121
"""
2222

23-
name: str = "vertical"
23+
name: str = "vertical-sovler-first"
2424
max_inner_turns: int = 3
2525

2626
async def astep(

agentverse/initialization.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -89,10 +89,10 @@ def prepare_task_config(task):
8989
agent_configs["tool_memory"] = load_memory(agent_configs["tool_memory"])
9090
llm = load_llm(agent_configs.get("llm", "text-davinci-003"))
9191
agent_configs["llm"] = llm
92-
#agent_configs["tools"] = load_tools(agent_configs.get("tools", []))
92+
# agent_configs["tools"] = load_tools(agent_configs.get("tools", []))
9393

9494
# Build the output parser
95-
output_parser_config = agent_configs.get("output_parser", {})
95+
output_parser_config = agent_configs.get("output_parser", {"type": "dummy"})
9696
if output_parser_config.get("type", None) == "role_assigner":
9797
output_parser_config["cnt_critic_agents"] = task_config.get(
9898
"cnt_critic_agents", 0

agentverse/llms/openai.py

+17-25
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import json
33
import ast
44
import os
5+
from aiohttp import ClientSession
56
from typing import Dict, List, Optional, Union
67
from tenacity import retry, stop_after_attempt, wait_exponential
78

@@ -23,9 +24,9 @@
2324
logging.warning("openai package is not installed")
2425
else:
2526
openai.api_key = os.environ.get("OPENAI_API_KEY")
26-
openai.proxy = os.environ.get("http_proxy")
27-
if openai.proxy is None:
28-
openai.proxy = os.environ.get("HTTP_PROXY")
27+
# openai.proxy = os.environ.get("http_proxy")
28+
# if openai.proxy is None:
29+
# openai.proxy = os.environ.get("HTTP_PROXY")
2930
if openai.api_key is None:
3031
logging.warning(
3132
"OpenAI API key is not set. Please set the environment variable OPENAI_API_KEY"
@@ -114,9 +115,6 @@ def generate_response(
114115
append_prompt: str = "",
115116
functions: List[dict] = [],
116117
) -> LLMResult:
117-
# logger.debug(prepend_prompt)
118-
# logger.debug(history)
119-
# logger.debug(append_prompt)
120118
messages = self.construct_messages(prepend_prompt, history, append_prompt)
121119
logger.log_prompt(messages)
122120

@@ -126,9 +124,6 @@ def generate_response(
126124
response = openai.ChatCompletion.create(
127125
messages=messages,
128126
functions=functions,
129-
# function_call="auto",
130-
# function_call={"name": "run_code"},
131-
# stream=True,
132127
**self.args.dict(),
133128
)
134129
if response["choices"][0]["message"].get("function_call") is not None:
@@ -180,23 +175,18 @@ async def agenerate_response(
180175
append_prompt: str = "",
181176
functions: List[dict] = [],
182177
) -> LLMResult:
183-
# logger.debug(prepend_prompt)
184-
# logger.debug(history)
185-
# logger.debug(append_prompt)
186178
messages = self.construct_messages(prepend_prompt, history, append_prompt)
187179
logger.log_prompt(messages)
188180

189181
try:
190-
# Execute function call
191182
if functions != []:
192-
response = await openai.ChatCompletion.acreate(
193-
messages=messages,
194-
functions=functions,
195-
# function_call="auto",
196-
# function_call={"name": "run_code"},
197-
# stream=True,
198-
**self.args.dict(),
199-
)
183+
async with ClientSession(trust_env=True) as session:
184+
openai.aiosession.set(session)
185+
response = await openai.ChatCompletion.acreate(
186+
messages=messages,
187+
functions=functions,
188+
**self.args.dict(),
189+
)
200190
if response["choices"][0]["message"].get("function_call") is not None:
201191
function_name = response["choices"][0]["message"]["function_call"][
202192
"name"
@@ -256,10 +246,12 @@ async def agenerate_response(
256246
)
257247

258248
else:
259-
response = await openai.ChatCompletion.acreate(
260-
messages=messages,
261-
**self.args.dict(),
262-
)
249+
async with ClientSession(trust_env=True) as session:
250+
openai.aiosession.set(session)
251+
response = await openai.ChatCompletion.acreate(
252+
messages=messages,
253+
**self.args.dict(),
254+
)
263255
return LLMResult(
264256
content=response["choices"][0]["message"]["content"],
265257
send_tokens=response["usage"]["prompt_tokens"],

0 commit comments

Comments
 (0)