Spaces:
Running
Running
Geevarghese George
commited on
Commit
·
949958d
1
Parent(s):
98b3c8d
update agent with tools
Browse files- README.md +2 -0
- app.py +28 -64
- config.py +1 -1
- src/upgrade_advisor/agents/package.py +21 -7
- src/upgrade_advisor/agents/prompts.py +28 -36
- src/upgrade_advisor/agents/tools.py +0 -99
- src/upgrade_advisor/agents/tools/__init__.py +1 -0
- src/upgrade_advisor/agents/tools/parse_response.py +143 -0
- src/upgrade_advisor/agents/tools/pypi_api.py +185 -0
- src/upgrade_advisor/agents/tools/tools.py +312 -0
- src/upgrade_advisor/agents/tools/uv_resolver.py +315 -0
- src/upgrade_advisor/chat/chat.py +0 -6
- src/upgrade_advisor/chat/prompts.py +1 -29
- src/upgrade_advisor/const.py +49 -0
- src/upgrade_advisor/misc.py +34 -0
- src/upgrade_advisor/schema/schema.py +1 -0
- src/upgrade_advisor/theme.py +124 -0
README.md
CHANGED
|
@@ -46,6 +46,8 @@ Connecting to `Continue` Extension from VSCode: TODO
|
|
| 46 |
],
|
| 47 |
"env": {
|
| 48 |
"GITHUB_PERSONAL_ACCESS_TOKEN": "${input:github_token}"
|
|
|
|
|
|
|
| 49 |
}
|
| 50 |
}
|
| 51 |
}
|
|
|
|
| 46 |
],
|
| 47 |
"env": {
|
| 48 |
"GITHUB_PERSONAL_ACCESS_TOKEN": "${input:github_token}"
|
| 49 |
+
"GITHUB_READ_ONLY": "1",
|
| 50 |
+
"GITHUB_TOOLSETS": "default",
|
| 51 |
}
|
| 52 |
}
|
| 53 |
}
|
app.py
CHANGED
|
@@ -10,10 +10,11 @@ from config import (
|
|
| 10 |
AGENT_MODEL,
|
| 11 |
CHAT_HISTORY_TURNS_CUTOFF,
|
| 12 |
CHAT_HISTORY_WORD_CUTOFF,
|
|
|
|
|
|
|
| 13 |
GITHUB_TOOLSETS,
|
| 14 |
HF_TOKEN,
|
| 15 |
)
|
| 16 |
-
from config import GITHUB_PAT as GITHUB_TOKEN
|
| 17 |
from src.upgrade_advisor.agents.package import PackageDiscoveryAgent
|
| 18 |
from src.upgrade_advisor.chat.chat import (
|
| 19 |
qn_rewriter,
|
|
@@ -21,9 +22,11 @@ from src.upgrade_advisor.chat.chat import (
|
|
| 21 |
summarize_chat_history,
|
| 22 |
)
|
| 23 |
from src.upgrade_advisor.misc import (
|
|
|
|
| 24 |
get_example_pyproject_question,
|
| 25 |
get_example_requirements_question,
|
| 26 |
)
|
|
|
|
| 27 |
|
| 28 |
logger = logging.getLogger(__name__)
|
| 29 |
logger.setLevel(logging.INFO)
|
|
@@ -34,37 +37,6 @@ uploads_dir = Path("uploads")
|
|
| 34 |
uploads_dir.mkdir(exist_ok=True)
|
| 35 |
uploads_dir = uploads_dir.resolve()
|
| 36 |
|
| 37 |
-
# TODO: Merge the MCP-PYPI with this.
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
def _monkeypatch_gradio_save_history():
|
| 41 |
-
"""Guard against non-int indices in Gradio's chat history saver.
|
| 42 |
-
|
| 43 |
-
Gradio 5.49.1 occasionally passes a component (e.g., Textbox) as the
|
| 44 |
-
conversation index when save_history=True, which raises a TypeError. We
|
| 45 |
-
coerce unexpected index types to None so Gradio inserts a new conversation
|
| 46 |
-
instead of erroring.
|
| 47 |
-
"""
|
| 48 |
-
import gradio as gr
|
| 49 |
-
|
| 50 |
-
if getattr(gr.ChatInterface, "_ua_safe_patch", False):
|
| 51 |
-
return
|
| 52 |
-
|
| 53 |
-
original = gr.ChatInterface._save_conversation
|
| 54 |
-
|
| 55 |
-
def _safe_save_conversation(self, index, conversation, saved_conversations):
|
| 56 |
-
if not isinstance(index, int):
|
| 57 |
-
index = None
|
| 58 |
-
try:
|
| 59 |
-
return original(self, index, conversation, saved_conversations)
|
| 60 |
-
except Exception:
|
| 61 |
-
logger.exception("Failed to save chat history; leaving history unchanged.")
|
| 62 |
-
return index, saved_conversations
|
| 63 |
-
|
| 64 |
-
gr.ChatInterface._save_conversation = _safe_save_conversation
|
| 65 |
-
gr.ChatInterface._ua_safe_patch = True
|
| 66 |
-
|
| 67 |
-
|
| 68 |
_monkeypatch_gradio_save_history()
|
| 69 |
|
| 70 |
|
|
@@ -160,33 +132,20 @@ if __name__ == "__main__":
|
|
| 160 |
logger.info("Starting MCP client...")
|
| 161 |
|
| 162 |
try:
|
| 163 |
-
gh_mcp_params =
|
| 164 |
-
|
| 165 |
-
# MCP server from GH in a container
|
| 166 |
-
command="podman",
|
| 167 |
-
args=[
|
| 168 |
-
"run",
|
| 169 |
-
"-i",
|
| 170 |
-
"--rm",
|
| 171 |
-
"-e",
|
| 172 |
-
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
| 173 |
-
"-e",
|
| 174 |
-
"GITHUB_READ_ONLY",
|
| 175 |
-
"-e",
|
| 176 |
-
"GITHUB_TOOLSETS",
|
| 177 |
-
"ghcr.io/github/github-mcp-server",
|
| 178 |
-
],
|
| 179 |
-
env={
|
| 180 |
-
"GITHUB_PERSONAL_ACCESS_TOKEN": GITHUB_TOKEN,
|
| 181 |
-
"GITHUB_READ_ONLY": "1",
|
| 182 |
-
"GITHUB_TOOLSETS": GITHUB_TOOLSETS,
|
| 183 |
-
},
|
| 184 |
-
)
|
| 185 |
-
pypi_mcp_params = dict(
|
| 186 |
-
# url="https://mcp-1st-birthday-pypi-mcp.hf.space/gradio_api/mcp/",
|
| 187 |
-
url="https://mcp-1st-birthday-uv-pypi-mcp.hf.space/gradio_api/mcp/",
|
| 188 |
transport="streamable-http",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 189 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
upload_mcp_params = StdioServerParameters(
|
| 191 |
command="uvx",
|
| 192 |
args=[
|
|
@@ -204,7 +163,7 @@ if __name__ == "__main__":
|
|
| 204 |
|
| 205 |
pypi_mcp_client = MCPClient(
|
| 206 |
server_parameters=[
|
| 207 |
-
pypi_mcp_params,
|
| 208 |
gh_mcp_params,
|
| 209 |
upload_mcp_params,
|
| 210 |
],
|
|
@@ -265,7 +224,7 @@ if __name__ == "__main__":
|
|
| 265 |
],
|
| 266 |
],
|
| 267 |
stop_btn=True,
|
| 268 |
-
theme=
|
| 269 |
)
|
| 270 |
demo.launch()
|
| 271 |
|
|
@@ -273,9 +232,14 @@ if __name__ == "__main__":
|
|
| 273 |
logger.info("Cleaning up MCP client resources")
|
| 274 |
# remove contents of uploads_dir
|
| 275 |
for f in uploads_dir.iterdir():
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 281 |
logger.info("Shutdown complete.")
|
|
|
|
| 10 |
AGENT_MODEL,
|
| 11 |
CHAT_HISTORY_TURNS_CUTOFF,
|
| 12 |
CHAT_HISTORY_WORD_CUTOFF,
|
| 13 |
+
GITHUB_PAT,
|
| 14 |
+
GITHUB_READ_ONLY,
|
| 15 |
GITHUB_TOOLSETS,
|
| 16 |
HF_TOKEN,
|
| 17 |
)
|
|
|
|
| 18 |
from src.upgrade_advisor.agents.package import PackageDiscoveryAgent
|
| 19 |
from src.upgrade_advisor.chat.chat import (
|
| 20 |
qn_rewriter,
|
|
|
|
| 22 |
summarize_chat_history,
|
| 23 |
)
|
| 24 |
from src.upgrade_advisor.misc import (
|
| 25 |
+
_monkeypatch_gradio_save_history,
|
| 26 |
get_example_pyproject_question,
|
| 27 |
get_example_requirements_question,
|
| 28 |
)
|
| 29 |
+
from src.upgrade_advisor.theme import christmas
|
| 30 |
|
| 31 |
logger = logging.getLogger(__name__)
|
| 32 |
logger.setLevel(logging.INFO)
|
|
|
|
| 37 |
uploads_dir.mkdir(exist_ok=True)
|
| 38 |
uploads_dir = uploads_dir.resolve()
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
_monkeypatch_gradio_save_history()
|
| 41 |
|
| 42 |
|
|
|
|
| 132 |
logger.info("Starting MCP client...")
|
| 133 |
|
| 134 |
try:
|
| 135 |
+
gh_mcp_params = dict(
|
| 136 |
+
url="https://api.githubcopilot.com/mcp/",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
transport="streamable-http",
|
| 138 |
+
headers={
|
| 139 |
+
"Authorization": f"Bearer {GITHUB_PAT}",
|
| 140 |
+
"X-MCP-Toolsets": GITHUB_TOOLSETS,
|
| 141 |
+
"X-MCP-Readonly": GITHUB_READ_ONLY,
|
| 142 |
+
},
|
| 143 |
)
|
| 144 |
+
# pypi_mcp_params = dict(
|
| 145 |
+
# # url="https://mcp-1st-birthday-pypi-mcp.hf.space/gradio_api/mcp/",
|
| 146 |
+
# url="https://mcp-1st-birthday-uv-pypi-mcp.hf.space/gradio_api/mcp/",
|
| 147 |
+
# transport="streamable-http",
|
| 148 |
+
# )
|
| 149 |
upload_mcp_params = StdioServerParameters(
|
| 150 |
command="uvx",
|
| 151 |
args=[
|
|
|
|
| 163 |
|
| 164 |
pypi_mcp_client = MCPClient(
|
| 165 |
server_parameters=[
|
| 166 |
+
# pypi_mcp_params,
|
| 167 |
gh_mcp_params,
|
| 168 |
upload_mcp_params,
|
| 169 |
],
|
|
|
|
| 224 |
],
|
| 225 |
],
|
| 226 |
stop_btn=True,
|
| 227 |
+
theme=christmas,
|
| 228 |
)
|
| 229 |
demo.launch()
|
| 230 |
|
|
|
|
| 232 |
logger.info("Cleaning up MCP client resources")
|
| 233 |
# remove contents of uploads_dir
|
| 234 |
for f in uploads_dir.iterdir():
|
| 235 |
+
if f.is_dir():
|
| 236 |
+
try:
|
| 237 |
+
shutil.rmtree(f)
|
| 238 |
+
except Exception:
|
| 239 |
+
logger.exception(f"Failed to delete uploaded directory: {f}")
|
| 240 |
+
else:
|
| 241 |
+
try:
|
| 242 |
+
f.unlink()
|
| 243 |
+
except Exception:
|
| 244 |
+
logger.exception(f"Failed to delete uploaded file: {f}")
|
| 245 |
logger.info("Shutdown complete.")
|
config.py
CHANGED
|
@@ -17,7 +17,7 @@ if not HF_TOKEN:
|
|
| 17 |
print("⚠️ Hugging Face token not found in .env file!")
|
| 18 |
|
| 19 |
GITHUB_TOOLSETS = os.getenv("GITHUB_TOOLSETS", "repos")
|
| 20 |
-
|
| 21 |
|
| 22 |
# Server configuration
|
| 23 |
GRADIO_SERVER_NAME = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0")
|
|
|
|
| 17 |
print("⚠️ Hugging Face token not found in .env file!")
|
| 18 |
|
| 19 |
GITHUB_TOOLSETS = os.getenv("GITHUB_TOOLSETS", "repos")
|
| 20 |
+
GITHUB_READ_ONLY = os.getenv("GITHUB_READ_ONLY", "1")
|
| 21 |
|
| 22 |
# Server configuration
|
| 23 |
GRADIO_SERVER_NAME = os.getenv("GRADIO_SERVER_NAME", "0.0.0.0")
|
src/upgrade_advisor/agents/package.py
CHANGED
|
@@ -13,7 +13,15 @@ from ..schema import ( # noqa
|
|
| 13 |
PackageVersionResponseSchema,
|
| 14 |
)
|
| 15 |
from .prompts import get_package_discovery_prompt
|
| 16 |
-
from .tools import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
logger = logging.getLogger(__name__)
|
| 19 |
logger.setLevel(logging.INFO)
|
|
@@ -28,17 +36,23 @@ class PackageDiscoveryAgent:
|
|
| 28 |
self.model = model
|
| 29 |
if tools is None:
|
| 30 |
tool_list: list = []
|
| 31 |
-
logger.info("No tools provided; initializing with an empty toolset.")
|
| 32 |
else:
|
| 33 |
tool_list = list(tools)
|
| 34 |
|
| 35 |
# additional custom tools
|
| 36 |
-
tool_list.
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
)
|
|
|
|
| 42 |
|
| 43 |
self.agent = CodeAgent(
|
| 44 |
tools=tool_list,
|
|
|
|
| 13 |
PackageVersionResponseSchema,
|
| 14 |
)
|
| 15 |
from .prompts import get_package_discovery_prompt
|
| 16 |
+
from .tools import (
|
| 17 |
+
PypiSearchTool,
|
| 18 |
+
PypiSearchVersionTool,
|
| 19 |
+
ReadUploadFileTool,
|
| 20 |
+
RepoFromPyPI,
|
| 21 |
+
RepoFromUrlTool,
|
| 22 |
+
ResolvePyProjectTOMLTool,
|
| 23 |
+
WriteTomlFileTool,
|
| 24 |
+
)
|
| 25 |
|
| 26 |
logger = logging.getLogger(__name__)
|
| 27 |
logger.setLevel(logging.INFO)
|
|
|
|
| 36 |
self.model = model
|
| 37 |
if tools is None:
|
| 38 |
tool_list: list = []
|
| 39 |
+
logger.info("No custom tools provided; initializing with an empty toolset.")
|
| 40 |
else:
|
| 41 |
tool_list = list(tools)
|
| 42 |
|
| 43 |
# additional custom tools
|
| 44 |
+
tool_list.extend(
|
| 45 |
+
[
|
| 46 |
+
ReadUploadFileTool(),
|
| 47 |
+
WriteTomlFileTool(),
|
| 48 |
+
ResolvePyProjectTOMLTool(),
|
| 49 |
+
PypiSearchTool(),
|
| 50 |
+
PypiSearchVersionTool(),
|
| 51 |
+
RepoFromUrlTool(),
|
| 52 |
+
RepoFromPyPI(),
|
| 53 |
+
]
|
| 54 |
)
|
| 55 |
+
logger.info("Custom tools added to the agent.")
|
| 56 |
|
| 57 |
self.agent = CodeAgent(
|
| 58 |
tools=tool_list,
|
src/upgrade_advisor/agents/prompts.py
CHANGED
|
@@ -12,20 +12,16 @@ def get_package_discovery_prompt(
|
|
| 12 |
if reframed_question:
|
| 13 |
user_input += f"\nREFRAMED QUESTION (LLM-generated):\n{reframed_question}\n"
|
| 14 |
|
| 15 |
-
# Add the rest of the prompt content here...
|
| 16 |
return f"""
|
| 17 |
You are a package discovery agent and an upgrade advisor for Python
|
| 18 |
packages.
|
| 19 |
Your goal is to find relevant metadata about Python packages using the
|
| 20 |
-
available tools and
|
| 21 |
-
on the user's question. If the user asks about upgrade recommendations,
|
| 22 |
compatibility issues, known bugs, or best practices, you should gather
|
| 23 |
-
relevant data and provide clear, actionable advice.
|
| 24 |
-
compatibility of packages with specific Python versions or other packages
|
| 25 |
-
using the appropriate tools.
|
| 26 |
|
| 27 |
-
|
| 28 |
-
upgrade recommendations, and best practices. For example, they may ask:
|
| 29 |
- "What are the known issues with pandas version 1.2.0?"
|
| 30 |
- "Is there a newer version of requests that fixes security vulnerabilities?"
|
| 31 |
- "What are the upgrade recommendations for Django from 2.x to 3.x?"
|
|
@@ -39,14 +35,14 @@ def get_package_discovery_prompt(
|
|
| 39 |
versions without breaking my project?"
|
| 40 |
|
| 41 |
Your knowledge cutoff may prevent you from knowing what's recent.
|
| 42 |
-
|
| 43 |
when reasoning about dates and
|
| 44 |
releases. Some tools also provide you the release date information, which
|
| 45 |
you can transform to ISO format and make comparisons.
|
| 46 |
|
| 47 |
-
The first step to tackle such questions is to gather relevant
|
| 48 |
-
packages involved using the available
|
| 49 |
-
`
|
| 50 |
can directly analyze a pyproject.toml content to find
|
| 51 |
compatibility issues and upgrade suggestions.
|
| 52 |
Use the tools to fetch
|
|
@@ -54,16 +50,19 @@ def get_package_discovery_prompt(
|
|
| 54 |
known issues. Then, analyze the collected data to identify any potential
|
| 55 |
issues, improvements, or recommendations related to the user's question.
|
| 56 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
IMPORTANT EXECUTION GUIDELINES:
|
| 58 |
- Do NOT print intermediate tool outputs. Do not wrap results in strings
|
| 59 |
or code fences.
|
| 60 |
-
-
|
| 61 |
-
`
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
-
|
| 65 |
-
how you arrived at your final answer. Do not omit this field. Do not
|
| 66 |
-
mention the tool names, rather what the tool helped you discover.
|
| 67 |
- Return the final structured object using: final_answer(<python_dict>)
|
| 68 |
- Ensure the returned object STRICTLY matches the expected schema for that tool.
|
| 69 |
Do not add or rename keys. Keep value types correct.
|
|
@@ -74,25 +73,18 @@ def get_package_discovery_prompt(
|
|
| 74 |
|
| 75 |
HINTS:
|
| 76 |
- MCP tool outputs are often structured (Python dict/list). Use them directly.
|
| 77 |
-
-
|
| 78 |
-
like the result's `info` field.
|
| 79 |
-
- To send pyproject.toml content to the `resolve_environment` tool, you
|
| 80 |
will need to use the `upload_file_to_gradio` tool first to upload the file.
|
| 81 |
-
- The output of `
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
using the `write_toml_file` tool and then upload it to gradio before passing
|
| 86 |
-
it to `resolve_environment`. Use triple quotes for the content.
|
| 87 |
- If you need more information about how to write a `pyproject.toml`, use
|
| 88 |
the information from PEP621: https://peps.python.org/pep-0621/
|
| 89 |
-
-
|
| 90 |
-
|
| 91 |
-
- Always prefer MCP tool data over web search data for package metadata.
|
| 92 |
-
- However, If you decide to use the `web_search`, you must ONLY rely on the
|
| 93 |
-
official package website, PyPI page, or official GitHub repo.
|
| 94 |
- NEVER fabricate data. If you cannot find the info, say so.
|
| 95 |
-
- For parsing version numbers, use the `packaging.version` module.
|
| 96 |
-
- When you have gathered the required info, call final_answer with
|
| 97 |
-
|
| 98 |
"""
|
|
|
|
| 12 |
if reframed_question:
|
| 13 |
user_input += f"\nREFRAMED QUESTION (LLM-generated):\n{reframed_question}\n"
|
| 14 |
|
|
|
|
| 15 |
return f"""
|
| 16 |
You are a package discovery agent and an upgrade advisor for Python
|
| 17 |
packages.
|
| 18 |
Your goal is to find relevant metadata about Python packages using the
|
| 19 |
+
available tools and use that context to provide helpful answers
|
| 20 |
+
based on the user's question. If the user asks about upgrade recommendations,
|
| 21 |
compatibility issues, known bugs, or best practices, you should gather
|
| 22 |
+
relevant data and provide clear, actionable advice.
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
For example, they may ask:
|
|
|
|
| 25 |
- "What are the known issues with pandas version 1.2.0?"
|
| 26 |
- "Is there a newer version of requests that fixes security vulnerabilities?"
|
| 27 |
- "What are the upgrade recommendations for Django from 2.x to 3.x?"
|
|
|
|
| 35 |
versions without breaking my project?"
|
| 36 |
|
| 37 |
Your knowledge cutoff may prevent you from knowing what's recent.
|
| 38 |
+
NO MATTER WHAT, always use the current date (ISO format YYYY-MM-DD): {today_date}
|
| 39 |
when reasoning about dates and
|
| 40 |
releases. Some tools also provide you the release date information, which
|
| 41 |
you can transform to ISO format and make comparisons.
|
| 42 |
|
| 43 |
+
The first step to tackle such questions is to gather relevant information about the
|
| 44 |
+
packages involved using the available tools. Some tools like the
|
| 45 |
+
`resolve_pyproject_toml`
|
| 46 |
can directly analyze a pyproject.toml content to find
|
| 47 |
compatibility issues and upgrade suggestions.
|
| 48 |
Use the tools to fetch
|
|
|
|
| 50 |
known issues. Then, analyze the collected data to identify any potential
|
| 51 |
issues, improvements, or recommendations related to the user's question.
|
| 52 |
|
| 53 |
+
Always prepare a plan before executing any tools. Iterate over the plan
|
| 54 |
+
step-by-step, using the tools as needed to gather more information. When evidence
|
| 55 |
+
is sufficient, provide a final answer that directly addresses the user's
|
| 56 |
+
question with clear recommendations or insights.
|
| 57 |
+
|
| 58 |
IMPORTANT EXECUTION GUIDELINES:
|
| 59 |
- Do NOT print intermediate tool outputs. Do not wrap results in strings
|
| 60 |
or code fences.
|
| 61 |
+
- Always keep tool results as Python dicts/lists. Index them directly.
|
| 62 |
+
- The `output_schema` of each tool describes the expected output structure.
|
| 63 |
+
- Make sure your final answer contains a "reasoning" field that explains
|
| 64 |
+
how you arrived at your final answer. Do not omit this field.
|
| 65 |
+
- Do not mention the tool names, rather mention what the tool helped you discover.
|
|
|
|
|
|
|
| 66 |
- Return the final structured object using: final_answer(<python_dict>)
|
| 67 |
- Ensure the returned object STRICTLY matches the expected schema for that tool.
|
| 68 |
Do not add or rename keys. Keep value types correct.
|
|
|
|
| 73 |
|
| 74 |
HINTS:
|
| 75 |
- MCP tool outputs are often structured (Python dict/list). Use them directly.
|
| 76 |
+
- To send pyproject.toml content to the `resolve_pyproject_toml` tool, you
|
|
|
|
|
|
|
| 77 |
will need to use the `upload_file_to_gradio` tool first to upload the file.
|
| 78 |
+
- The output of `resolve_pyproject_toml` contains `errored` field which
|
| 79 |
+
indicates (boolean) if there were any errors in resolution.
|
| 80 |
+
If true, check `logs` field for
|
| 81 |
+
details. The `logs` field contains useful information of `uv` stderr output.
|
|
|
|
|
|
|
| 82 |
- If you need more information about how to write a `pyproject.toml`, use
|
| 83 |
the information from PEP621: https://peps.python.org/pep-0621/
|
| 84 |
+
- If you decide to use the `web_search`, you must ONLY rely on the
|
| 85 |
+
official package website, PyPI page, or official GitHub repo.
|
|
|
|
|
|
|
|
|
|
| 86 |
- NEVER fabricate data. If you cannot find the info, say so.
|
| 87 |
+
- For parsing version numbers, use the `packaging.version` module.
|
| 88 |
+
- When you have gathered the required info, call `final_answer` with the BEST
|
| 89 |
+
structured object that answers the user query according to the appropriate schema.
|
| 90 |
"""
|
src/upgrade_advisor/agents/tools.py
DELETED
|
@@ -1,99 +0,0 @@
|
|
| 1 |
-
import logging
|
| 2 |
-
import shutil
|
| 3 |
-
from pathlib import Path
|
| 4 |
-
|
| 5 |
-
from smolagents.tools import Tool
|
| 6 |
-
|
| 7 |
-
from ..misc import UPLOADS_DIR
|
| 8 |
-
|
| 9 |
-
logger = logging.getLogger(__name__)
|
| 10 |
-
logger.setLevel(logging.INFO)
|
| 11 |
-
logger.addHandler(logging.StreamHandler())
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
class ReadUploadFileTool(Tool):
|
| 15 |
-
"""Tool to safely read files saved in the `uploads` directory."""
|
| 16 |
-
|
| 17 |
-
name = "read_upload_file"
|
| 18 |
-
description = """
|
| 19 |
-
Read a user-uploaded text file from the uploads directory.
|
| 20 |
-
Input: `path` should be the absolute path you received (or a filename)
|
| 21 |
-
under the `uploads` folder. Returns the file contents as text."""
|
| 22 |
-
inputs = {
|
| 23 |
-
"path": {
|
| 24 |
-
"type": "string",
|
| 25 |
-
"description": "Absolute or relative path to the uploaded file \
|
| 26 |
-
that is present under the `uploads` directory.",
|
| 27 |
-
}
|
| 28 |
-
}
|
| 29 |
-
output_type = "string"
|
| 30 |
-
|
| 31 |
-
def __init__(self):
|
| 32 |
-
self.upload_root = UPLOADS_DIR
|
| 33 |
-
super().__init__()
|
| 34 |
-
|
| 35 |
-
def forward(self, path: str) -> str:
|
| 36 |
-
file_path = Path(path).expanduser()
|
| 37 |
-
if not file_path.is_absolute():
|
| 38 |
-
file_path = self.upload_root / file_path
|
| 39 |
-
|
| 40 |
-
try:
|
| 41 |
-
resolved = file_path.resolve()
|
| 42 |
-
except FileNotFoundError as exc:
|
| 43 |
-
raise FileNotFoundError(f"File not found: {file_path}") from exc
|
| 44 |
-
|
| 45 |
-
if not resolved.exists():
|
| 46 |
-
raise FileNotFoundError(f"File not found: {resolved}")
|
| 47 |
-
|
| 48 |
-
try:
|
| 49 |
-
resolved.relative_to(self.upload_root)
|
| 50 |
-
except ValueError as exc:
|
| 51 |
-
raise ValueError(
|
| 52 |
-
f"Refusing to read '{resolved}': \
|
| 53 |
-
not inside uploads directory {self.upload_root}"
|
| 54 |
-
) from exc
|
| 55 |
-
|
| 56 |
-
if resolved.is_dir():
|
| 57 |
-
raise IsADirectoryError(f"Refusing to read directory: {resolved}")
|
| 58 |
-
|
| 59 |
-
return resolved.read_text(encoding="utf-8")
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
class WriteTomlFileTool(Tool):
|
| 63 |
-
"""Tool to write pyproject.toml content to a temp file."""
|
| 64 |
-
|
| 65 |
-
name = "write_toml_file"
|
| 66 |
-
description = """
|
| 67 |
-
Write the provided pyproject.toml content to a temporary file.
|
| 68 |
-
Input: `content` is the string content of the pyproject.toml file.
|
| 69 |
-
Returns the absolute path to the created temporary file."""
|
| 70 |
-
inputs = {
|
| 71 |
-
"content": {
|
| 72 |
-
"type": "string",
|
| 73 |
-
"description": "The content of the pyproject.toml file to write.",
|
| 74 |
-
}
|
| 75 |
-
}
|
| 76 |
-
output_type = "string"
|
| 77 |
-
|
| 78 |
-
def __init__(self):
|
| 79 |
-
self.upload_root = UPLOADS_DIR
|
| 80 |
-
super().__init__()
|
| 81 |
-
|
| 82 |
-
def forward(self, content: str) -> str:
|
| 83 |
-
import os
|
| 84 |
-
import tempfile
|
| 85 |
-
|
| 86 |
-
with tempfile.NamedTemporaryFile(
|
| 87 |
-
mode="w",
|
| 88 |
-
suffix=".toml",
|
| 89 |
-
delete=False, # do not delete so it can be read later
|
| 90 |
-
encoding="utf-8",
|
| 91 |
-
dir=f"{self.upload_root}/temp",
|
| 92 |
-
) as temp_file:
|
| 93 |
-
logger.info(
|
| 94 |
-
f"Temporary directory exists: {os.path.exists(f'{self.upload_root}/temp')}"
|
| 95 |
-
)
|
| 96 |
-
temp_file.write(content)
|
| 97 |
-
shutil.move(temp_file.name, f"{self.upload_root}/temp/pyproject.toml")
|
| 98 |
-
# return as pyproject.toml
|
| 99 |
-
return f"{self.upload_root}/temp/pyproject.toml"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/upgrade_advisor/agents/tools/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .tools import * # noqa: F403,F401
|
src/upgrade_advisor/agents/tools/parse_response.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from src.upgrade_advisor.schema import (
|
| 4 |
+
PackageInfoSchema,
|
| 5 |
+
PackageReleaseSchema,
|
| 6 |
+
PackageSearchResponseSchema,
|
| 7 |
+
PackageVersionResponseSchema,
|
| 8 |
+
ResolvedDep,
|
| 9 |
+
ResolveResult,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
+
logger.setLevel(logging.INFO)
|
| 14 |
+
logger.addHandler(logging.StreamHandler())
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def parse_response_pypi_search(
|
| 18 |
+
data: dict, cutoff: int = 10
|
| 19 |
+
) -> PackageSearchResponseSchema:
|
| 20 |
+
"""
|
| 21 |
+
Parse the JSON response from the PyPI search API into a Pydantic model.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
data (dict): The JSON response from the PyPI search API.
|
| 25 |
+
cutoff (int): The maximum number of releases to include in the response. Defaults to 10.
|
| 26 |
+
Returns:
|
| 27 |
+
PackageSearchResponseSchema: A Pydantic model containing metadata about the package.
|
| 28 |
+
"""
|
| 29 |
+
info = data.get("info", {})
|
| 30 |
+
releases = dict(list(data.get("releases", {}).items())[-cutoff:])
|
| 31 |
+
last_serial = data.get("last_serial", 0)
|
| 32 |
+
|
| 33 |
+
# create the info
|
| 34 |
+
info = PackageInfoSchema(**info)
|
| 35 |
+
# create the releases
|
| 36 |
+
parsed_releases = {}
|
| 37 |
+
for version, release_list in releases.items():
|
| 38 |
+
# the whl and tar.gz contain the same info and are placed in a list
|
| 39 |
+
try:
|
| 40 |
+
release = release_list[0]
|
| 41 |
+
except IndexError:
|
| 42 |
+
release = {}
|
| 43 |
+
parsed_release_list = PackageReleaseSchema(version=version, **release)
|
| 44 |
+
parsed_releases[version] = parsed_release_list
|
| 45 |
+
|
| 46 |
+
# create the final response model
|
| 47 |
+
parsed_response = PackageSearchResponseSchema(
|
| 48 |
+
info=info,
|
| 49 |
+
releases=parsed_releases,
|
| 50 |
+
last_serial=last_serial,
|
| 51 |
+
)
|
| 52 |
+
return parsed_response
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def parse_response_version_search(
|
| 56 |
+
data: dict, cutoff: int = 10
|
| 57 |
+
) -> PackageVersionResponseSchema:
|
| 58 |
+
"""
|
| 59 |
+
Parse the JSON response from the PyPI version search API.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
data (dict): The JSON response from the PyPI version search API.
|
| 63 |
+
cutoff (int): The maximum number of URLs to include in the response from the end of the list. Defaults to the last 10.
|
| 64 |
+
Returns:
|
| 65 |
+
PackageVersionResponseSchema: A Pydantic model containing metadata about the specific version of the package.
|
| 66 |
+
"""
|
| 67 |
+
info = data.get("info", {})
|
| 68 |
+
urls = data.get("urls", [])[-cutoff:] # get only the last `cutoff` entries
|
| 69 |
+
last_serial = data.get("last_serial", 0)
|
| 70 |
+
# create the info
|
| 71 |
+
info = PackageInfoSchema(**info)
|
| 72 |
+
# create the urls
|
| 73 |
+
parsed_urls = []
|
| 74 |
+
for url_info in urls:
|
| 75 |
+
parsed_url = PackageReleaseSchema(
|
| 76 |
+
version=url_info.get("version", ""), # this is empty in the url info
|
| 77 |
+
**url_info,
|
| 78 |
+
)
|
| 79 |
+
parsed_urls.append(parsed_url)
|
| 80 |
+
parsed_response = PackageVersionResponseSchema(
|
| 81 |
+
info=info,
|
| 82 |
+
urls=parsed_urls,
|
| 83 |
+
last_serial=last_serial,
|
| 84 |
+
)
|
| 85 |
+
return parsed_response
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def parse_resolved_deps(data: str) -> ResolveResult:
|
| 89 |
+
"""
|
| 90 |
+
Parse the resolved dependencies from the output string of a uv resolution command.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
data (str): The output string containing resolved dependencies.
|
| 94 |
+
Returns:
|
| 95 |
+
ResolveResult: A ResolveResult model containing resolved dependencies.
|
| 96 |
+
"""
|
| 97 |
+
resolved_deps = []
|
| 98 |
+
current_dep = None
|
| 99 |
+
lines = data.splitlines()
|
| 100 |
+
for line_number, line in enumerate(lines):
|
| 101 |
+
# skip 2 lines
|
| 102 |
+
if line_number < 2:
|
| 103 |
+
continue
|
| 104 |
+
# one liner:
|
| 105 |
+
# # via pydantic
|
| 106 |
+
# could also be:
|
| 107 |
+
# # via
|
| 108 |
+
# # pydantic=1.10.7
|
| 109 |
+
# # packaging=23.1
|
| 110 |
+
# if begins with ascii character
|
| 111 |
+
if not line.lower().strip().startswith("#"):
|
| 112 |
+
direct_dep = line.strip()
|
| 113 |
+
# pycparser==2.23 ; implementation_name != 'PyPy' and platform_python_implementation != 'PyPy'
|
| 114 |
+
direct_dep_metainfo = direct_dep.rsplit(";", 1)
|
| 115 |
+
direct_dep = direct_dep_metainfo[0].strip().split("==")
|
| 116 |
+
direct_dep_name = direct_dep[0] if len(direct_dep) > 0 else direct_dep
|
| 117 |
+
direct_dep_version = direct_dep[1] if len(direct_dep) > 1 else ""
|
| 118 |
+
logger.info(
|
| 119 |
+
f"Parsed direct dependency: {direct_dep_name}=={direct_dep_version}"
|
| 120 |
+
)
|
| 121 |
+
resolved_dep = ResolvedDep(
|
| 122 |
+
name=direct_dep_name,
|
| 123 |
+
version=direct_dep_version,
|
| 124 |
+
via=[], # will be filled later
|
| 125 |
+
metainfo=direct_dep_metainfo[1].strip()
|
| 126 |
+
if len(direct_dep_metainfo) > 1
|
| 127 |
+
else "",
|
| 128 |
+
)
|
| 129 |
+
resolved_deps.append(resolved_dep)
|
| 130 |
+
current_dep = direct_dep_name
|
| 131 |
+
continue
|
| 132 |
+
else:
|
| 133 |
+
if line.replace(" #", "").strip() == "via":
|
| 134 |
+
continue
|
| 135 |
+
indirect_dep = line.replace(" #", "").replace("via", "").strip()
|
| 136 |
+
if current_dep is not None:
|
| 137 |
+
resolved_deps[-1].update_indirect_dep(indirect_dep)
|
| 138 |
+
logger.info(
|
| 139 |
+
f"Updated indirect dependency for {current_dep}: {indirect_dep}"
|
| 140 |
+
)
|
| 141 |
+
logger.info(f"Total resolved dependencies parsed: {len(resolved_deps)}")
|
| 142 |
+
logger.debug(f"Resolved dependencies details: {resolved_deps}")
|
| 143 |
+
return ResolveResult(deps={dep.name: dep for dep in resolved_deps})
|
src/upgrade_advisor/agents/tools/pypi_api.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
import requests
|
| 5 |
+
from requests import HTTPError
|
| 6 |
+
|
| 7 |
+
from src.upgrade_advisor.schema import (
|
| 8 |
+
ErrorResponseSchema,
|
| 9 |
+
GithubRepoSchema,
|
| 10 |
+
PackageGitHubandReleasesSchema,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
from .parse_response import (
|
| 14 |
+
parse_response_pypi_search,
|
| 15 |
+
parse_response_version_search,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
async def pypi_search(
|
| 20 |
+
package: str,
|
| 21 |
+
cutoff: int = 10,
|
| 22 |
+
) -> dict:
|
| 23 |
+
"""
|
| 24 |
+
Get metadata about the PyPI package from the PyPI Index provided the package name.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
package (str): Name of the package to look up.
|
| 28 |
+
cutoff (int): The maximum number of releases to include in the response. Defaults to 10.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
dict: Parsed package metadata or an error payload.
|
| 32 |
+
"""
|
| 33 |
+
REQUEST_URL = f"https://pypi.python.org/pypi/{package}/json"
|
| 34 |
+
response = requests.get(REQUEST_URL, timeout=10)
|
| 35 |
+
if response.ok:
|
| 36 |
+
result = parse_response_pypi_search(response.json(), cutoff=cutoff)
|
| 37 |
+
return result.model_dump()
|
| 38 |
+
|
| 39 |
+
e = HTTPError(str(response.status_code))
|
| 40 |
+
return ErrorResponseSchema(error=str(e)).model_dump()
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
async def pypi_search_version(package: str, version: str, cutoff: int = 10) -> dict:
|
| 44 |
+
"""
|
| 45 |
+
Get metadata about the PyPI package from the PyPI Index provided the
|
| 46 |
+
package name and version.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
package (str): Name of the package to look up.
|
| 50 |
+
version (str): Version number of the released package.
|
| 51 |
+
|
| 52 |
+
Returns:
|
| 53 |
+
dict: A dictionary containing metadata about the specific
|
| 54 |
+
version of the package. Returns an error message in dictionary
|
| 55 |
+
form if fetching fails.
|
| 56 |
+
"""
|
| 57 |
+
REQUEST_URL = f"https://pypi.python.org/pypi/{package}/{version}/json"
|
| 58 |
+
response = requests.get(REQUEST_URL, timeout=10)
|
| 59 |
+
if response.ok:
|
| 60 |
+
result = parse_response_version_search(response.json(), cutoff=cutoff)
|
| 61 |
+
return result.model_dump()
|
| 62 |
+
|
| 63 |
+
e = HTTPError(str(response.status_code))
|
| 64 |
+
return ErrorResponseSchema(error=str(e)).model_dump()
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def resolve_repo_from_url(url: str) -> dict:
|
| 68 |
+
"""
|
| 69 |
+
Given a GitHub repository URL, return the owner and repo name using regex.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
url (str): The GitHub repository URL.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
dict: A dictionary containing the owner and repository name.
|
| 76 |
+
Returns an error message if the URL is invalid.
|
| 77 |
+
|
| 78 |
+
Example output:
|
| 79 |
+
{
|
| 80 |
+
"owner": "username",
|
| 81 |
+
"repo": "repository-name"
|
| 82 |
+
}
|
| 83 |
+
"""
|
| 84 |
+
# add slash at end of string
|
| 85 |
+
if not url.endswith("/"):
|
| 86 |
+
url = f"{url}/"
|
| 87 |
+
# add https if not starting with that
|
| 88 |
+
if not url.startswith("http"):
|
| 89 |
+
url = f"https://{url}"
|
| 90 |
+
# match in groups the username and repo name -> https://regex101.com/
|
| 91 |
+
pattern = r"https?://(:?www\.)?github\.com/([^/]+)/([^/]+)(?:\.git)?/?"
|
| 92 |
+
matches = re.match(pattern, url)
|
| 93 |
+
if matches:
|
| 94 |
+
owner, repo = matches.groups()[-2:] # take the last two matches
|
| 95 |
+
# Remove .git suffix if matched
|
| 96 |
+
if repo.endswith(".git"):
|
| 97 |
+
repo = repo[:-4]
|
| 98 |
+
return GithubRepoSchema(owner=owner, repo=repo).model_dump()
|
| 99 |
+
|
| 100 |
+
return ErrorResponseSchema(error="Invalid GitHub repository URL.").model_dump()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
async def github_repo_and_releases(
|
| 104 |
+
name: str,
|
| 105 |
+
cutoff: int = 10,
|
| 106 |
+
) -> dict:
|
| 107 |
+
"""Lookup the PyPI index with the package name to find the
|
| 108 |
+
Github repository URL of the project and all releases published to PyPI.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
name (str): The package name
|
| 112 |
+
cutoff (int): The maximum number of releases to include in the response. Defaults to 10.
|
| 113 |
+
Returns:
|
| 114 |
+
dict: GitHub repository URL and releases for the package or an error payload.
|
| 115 |
+
"""
|
| 116 |
+
result = await pypi_search(name, cutoff=cutoff)
|
| 117 |
+
if result.get("error"):
|
| 118 |
+
return result
|
| 119 |
+
|
| 120 |
+
try:
|
| 121 |
+
# first attempt to extract from project_urls field
|
| 122 |
+
gh_url = extract_github_url(result.get("info", {}))
|
| 123 |
+
if gh_url is None:
|
| 124 |
+
# second attempt to extract from description field
|
| 125 |
+
gh_url = extract_github_url_description(result.get("info", {}))
|
| 126 |
+
# if still none, return error
|
| 127 |
+
if gh_url is None:
|
| 128 |
+
return ErrorResponseSchema(
|
| 129 |
+
error="Could not find Github URL from PyPI metadata."
|
| 130 |
+
).model_dump()
|
| 131 |
+
# get all releases
|
| 132 |
+
releases = list(result.get("releases", {}).keys())
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return ErrorResponseSchema(
|
| 135 |
+
error=f"Error processing PyPI data: {str(e)}"
|
| 136 |
+
).model_dump()
|
| 137 |
+
return PackageGitHubandReleasesSchema(
|
| 138 |
+
name=name, url=gh_url, releases=releases
|
| 139 |
+
).model_dump()
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def extract_github_url(info: dict) -> Optional[str]:
|
| 143 |
+
"""Extract the GitHub repository URL from the package info dictionary.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
info (dict): The 'info' section of the PyPI package metadata.
|
| 147 |
+
Returns:
|
| 148 |
+
Union[str, None]: The GitHub repository URL if found, otherwise None.
|
| 149 |
+
"""
|
| 150 |
+
gh_url = None
|
| 151 |
+
gh_urls = info.get("project_urls", {})
|
| 152 |
+
# candidate keys to find the URL in the dict
|
| 153 |
+
keys = [
|
| 154 |
+
"Source",
|
| 155 |
+
"source",
|
| 156 |
+
"Repository",
|
| 157 |
+
"repository",
|
| 158 |
+
"Homepage",
|
| 159 |
+
"homepage",
|
| 160 |
+
"Home",
|
| 161 |
+
"home",
|
| 162 |
+
]
|
| 163 |
+
for key in keys:
|
| 164 |
+
url = gh_urls.get(key, "")
|
| 165 |
+
if "github" in url:
|
| 166 |
+
gh_url = url
|
| 167 |
+
break
|
| 168 |
+
|
| 169 |
+
return gh_url
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def extract_github_url_description(info: dict) -> Optional[str]:
|
| 173 |
+
"""Extract the GitHub repository URL from the package description field.
|
| 174 |
+
Args:
|
| 175 |
+
info (dict): The 'info' section of the PyPI package metadata.
|
| 176 |
+
Returns:
|
| 177 |
+
Union[str, None]: The GitHub repository URL if found, otherwise None.
|
| 178 |
+
"""
|
| 179 |
+
description = info.get("description", "")
|
| 180 |
+
pattern = r"https?://(:?www\.)?github\.com/[^/\s]+/[^/\s]+(?:\.git)?/?"
|
| 181 |
+
matches = re.findall(pattern, description)
|
| 182 |
+
if matches:
|
| 183 |
+
# Return the first matched URL
|
| 184 |
+
return matches[0]
|
| 185 |
+
return None
|
src/upgrade_advisor/agents/tools/tools.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import shutil
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from smolagents.tools import Tool
|
| 6 |
+
|
| 7 |
+
from src.upgrade_advisor.misc import UPLOADS_DIR
|
| 8 |
+
from src.upgrade_advisor.schema import (
|
| 9 |
+
ALLOWED_OS,
|
| 10 |
+
GithubRepoSchema,
|
| 11 |
+
PackageGitHubandReleasesSchema,
|
| 12 |
+
PackageSearchResponseSchema,
|
| 13 |
+
PackageVersionResponseSchema,
|
| 14 |
+
UVResolutionResultSchema,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from .pypi_api import (
|
| 18 |
+
github_repo_and_releases,
|
| 19 |
+
pypi_search,
|
| 20 |
+
pypi_search_version,
|
| 21 |
+
resolve_repo_from_url,
|
| 22 |
+
)
|
| 23 |
+
from .uv_resolver import resolve_environment
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
logger.setLevel(logging.INFO)
|
| 27 |
+
logger.addHandler(logging.StreamHandler())
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class ReadUploadFileTool(Tool):
|
| 31 |
+
"""Tool to safely read files saved in the `uploads` directory."""
|
| 32 |
+
|
| 33 |
+
name = "read_upload_file"
|
| 34 |
+
description = """
|
| 35 |
+
Read a user-uploaded text file from the uploads directory.
|
| 36 |
+
Input: `path` should be the absolute path you received (or a filename)
|
| 37 |
+
under the `uploads` folder. Returns the file contents as text."""
|
| 38 |
+
inputs = {
|
| 39 |
+
"path": {
|
| 40 |
+
"type": "string",
|
| 41 |
+
"description": "Absolute or relative path to the uploaded file \
|
| 42 |
+
that is present under the `uploads` directory.",
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
output_type = "string"
|
| 46 |
+
|
| 47 |
+
def __init__(self):
|
| 48 |
+
self.upload_root = UPLOADS_DIR
|
| 49 |
+
super().__init__()
|
| 50 |
+
|
| 51 |
+
def forward(self, path: str) -> str:
|
| 52 |
+
file_path = Path(path).expanduser()
|
| 53 |
+
if not file_path.is_absolute():
|
| 54 |
+
file_path = self.upload_root / file_path
|
| 55 |
+
|
| 56 |
+
try:
|
| 57 |
+
resolved = file_path.resolve()
|
| 58 |
+
except FileNotFoundError as exc:
|
| 59 |
+
raise FileNotFoundError(f"File not found: {file_path}") from exc
|
| 60 |
+
|
| 61 |
+
if not resolved.exists():
|
| 62 |
+
raise FileNotFoundError(f"File not found: {resolved}")
|
| 63 |
+
|
| 64 |
+
try:
|
| 65 |
+
resolved.relative_to(self.upload_root)
|
| 66 |
+
except ValueError as exc:
|
| 67 |
+
raise ValueError(
|
| 68 |
+
f"Refusing to read '{resolved}': \
|
| 69 |
+
not inside uploads directory {self.upload_root}"
|
| 70 |
+
) from exc
|
| 71 |
+
|
| 72 |
+
if resolved.is_dir():
|
| 73 |
+
raise IsADirectoryError(f"Refusing to read directory: {resolved}")
|
| 74 |
+
|
| 75 |
+
return resolved.read_text(encoding="utf-8")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class WriteTomlFileTool(Tool):
|
| 79 |
+
"""Tool to write pyproject.toml content to a temp file."""
|
| 80 |
+
|
| 81 |
+
name = "write_toml_file"
|
| 82 |
+
description = """
|
| 83 |
+
Write the provided pyproject.toml content to a temporary file.
|
| 84 |
+
This is a useful tool to create a pyproject.toml file for dependency
|
| 85 |
+
resolution.
|
| 86 |
+
Especially if the user wants to introduce a few changes (like
|
| 87 |
+
adding or updating dependencies) to the original
|
| 88 |
+
pyproject.toml file before resolving dependencies.
|
| 89 |
+
Also useful if the user cannot upload files directly or if the uploaded
|
| 90 |
+
file has missing sections or formatting issues.
|
| 91 |
+
"""
|
| 92 |
+
inputs = {
|
| 93 |
+
"content": {
|
| 94 |
+
"type": "string",
|
| 95 |
+
"description": "The content of the pyproject.toml file to write.",
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
output_type = "string"
|
| 99 |
+
|
| 100 |
+
output_schema = """
|
| 101 |
+
{
|
| 102 |
+
"type": "string",
|
| 103 |
+
"description": "Absolute path to the written temporary pyproject.toml file."
|
| 104 |
+
}
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
def __init__(self):
|
| 108 |
+
self.upload_root = UPLOADS_DIR
|
| 109 |
+
self.temp_dir = self.upload_root / "temp"
|
| 110 |
+
self.temp_dir.mkdir(parents=True, exist_ok=True)
|
| 111 |
+
super().__init__()
|
| 112 |
+
|
| 113 |
+
def forward(self, content: str) -> str:
|
| 114 |
+
import os
|
| 115 |
+
import tempfile
|
| 116 |
+
|
| 117 |
+
with tempfile.NamedTemporaryFile(
|
| 118 |
+
mode="w",
|
| 119 |
+
suffix=".toml",
|
| 120 |
+
delete=False, # do not delete so it can be read later
|
| 121 |
+
encoding="utf-8",
|
| 122 |
+
dir=str(self.temp_dir),
|
| 123 |
+
) as temp_file:
|
| 124 |
+
logger.info(
|
| 125 |
+
f"Temporary directory exists: {os.path.exists(str(self.temp_dir))}"
|
| 126 |
+
)
|
| 127 |
+
temp_file.write(content)
|
| 128 |
+
shutil.move(temp_file.name, str(self.temp_dir / "pyproject.toml"))
|
| 129 |
+
# return as pyproject.toml
|
| 130 |
+
return str(self.temp_dir / "pyproject.toml")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class ResolvePyProjectTOMLTool(Tool):
|
| 134 |
+
"""Tool to resolve dependencies from a pyproject.toml file using uv."""
|
| 135 |
+
|
| 136 |
+
name = "resolve_pyproject_toml"
|
| 137 |
+
description = """Using `uv` resolver, this tool takes a pyproject.toml file
|
| 138 |
+
and resolves its dependencies according to the specified strategy and
|
| 139 |
+
environment settings (Python version, platform, etc.). It does not
|
| 140 |
+
support requirements.txt files. The file needs to be provided as an
|
| 141 |
+
absolute path.
|
| 142 |
+
It returns a dictionary with the schema described in `output_schema` attribute.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
output_schema = UVResolutionResultSchema.schema_json()
|
| 146 |
+
output_type = "object"
|
| 147 |
+
inputs = {
|
| 148 |
+
"toml_file": {
|
| 149 |
+
"type": "string",
|
| 150 |
+
"description": "Absolute path to the pyproject.toml file.",
|
| 151 |
+
},
|
| 152 |
+
"resolution_strategy": {
|
| 153 |
+
"type": "string",
|
| 154 |
+
"description": "Resolution strategy: 'lowest-direct', 'lowest', 'highest'.",
|
| 155 |
+
},
|
| 156 |
+
"python_platform": {
|
| 157 |
+
"type": "string",
|
| 158 |
+
"description": f"Target Python platform. One of the allowed OS values in {ALLOWED_OS}.",
|
| 159 |
+
},
|
| 160 |
+
"python_version": {
|
| 161 |
+
"type": "string",
|
| 162 |
+
"description": "Target Python version, e.g., '3.10'. Should be >= 3.8.",
|
| 163 |
+
},
|
| 164 |
+
"universal": {
|
| 165 |
+
"type": "boolean",
|
| 166 |
+
"description": "Whether to use universal wheels. Defaults to False. Cannot be True if a specific platform/OS is specified.",
|
| 167 |
+
},
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
def __init__(self):
|
| 171 |
+
super().__init__()
|
| 172 |
+
|
| 173 |
+
def forward(
|
| 174 |
+
self,
|
| 175 |
+
toml_file: str,
|
| 176 |
+
resolution_strategy: str,
|
| 177 |
+
python_platform: str,
|
| 178 |
+
python_version: str,
|
| 179 |
+
universal: bool,
|
| 180 |
+
) -> dict:
|
| 181 |
+
result = resolve_environment(
|
| 182 |
+
toml_file=toml_file,
|
| 183 |
+
resolution_strategy=resolution_strategy,
|
| 184 |
+
python_platform=python_platform,
|
| 185 |
+
python_version=python_version,
|
| 186 |
+
universal=universal,
|
| 187 |
+
)
|
| 188 |
+
return result
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
class PypiSearchTool(Tool):
|
| 192 |
+
"""Tool to search PyPI for package metadata."""
|
| 193 |
+
|
| 194 |
+
name = "pypi_search"
|
| 195 |
+
description = """
|
| 196 |
+
Get metadata about a PyPI package by its name.
|
| 197 |
+
It returns a dictionary with the schema described in `output_schema` attribute.
|
| 198 |
+
"""
|
| 199 |
+
inputs = {
|
| 200 |
+
"package": {
|
| 201 |
+
"type": "string",
|
| 202 |
+
"description": "Name of the package to look up on PyPI.",
|
| 203 |
+
},
|
| 204 |
+
"cutoff": {
|
| 205 |
+
"type": "integer",
|
| 206 |
+
"description": "The maximum number of releases to include in the response. Defaults to 10.",
|
| 207 |
+
},
|
| 208 |
+
}
|
| 209 |
+
output_type = "object"
|
| 210 |
+
output_schema = PackageSearchResponseSchema.schema_json()
|
| 211 |
+
|
| 212 |
+
def __init__(self):
|
| 213 |
+
super().__init__()
|
| 214 |
+
|
| 215 |
+
def forward(self, package: str, cutoff: int = 10) -> dict:
|
| 216 |
+
result = pypi_search(package, cutoff=cutoff)
|
| 217 |
+
return result
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
class PypiSearchVersionTool(Tool):
|
| 221 |
+
"""Tool to search PyPI for specific package version metadata."""
|
| 222 |
+
|
| 223 |
+
name = "pypi_search_version"
|
| 224 |
+
description = """
|
| 225 |
+
Get metadata about a specific version of a PyPI package.
|
| 226 |
+
It returns a dictionary with the schema described in `output_schema` attribute.
|
| 227 |
+
"""
|
| 228 |
+
inputs = {
|
| 229 |
+
"package": {
|
| 230 |
+
"type": "string",
|
| 231 |
+
"description": "Name of the package to look up on PyPI.",
|
| 232 |
+
},
|
| 233 |
+
"version": {
|
| 234 |
+
"type": "string",
|
| 235 |
+
"description": "Version number of the released package.",
|
| 236 |
+
},
|
| 237 |
+
"cutoff": {
|
| 238 |
+
"type": "integer",
|
| 239 |
+
"description": "The maximum number of URLs to include in the response from the end of the list. Defaults to 10.",
|
| 240 |
+
},
|
| 241 |
+
}
|
| 242 |
+
output_type = "object"
|
| 243 |
+
output_schema = PackageVersionResponseSchema.schema_json()
|
| 244 |
+
|
| 245 |
+
def __init__(self):
|
| 246 |
+
super().__init__()
|
| 247 |
+
|
| 248 |
+
def forward(self, package: str, version: str, cutoff: int = 10) -> dict:
|
| 249 |
+
result = pypi_search_version(package, version, cutoff=cutoff)
|
| 250 |
+
return result
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
class RepoFromURL(Tool):
|
| 254 |
+
"""Tool to extract GitHub repository information from a URL."""
|
| 255 |
+
|
| 256 |
+
name = "repo_from_url"
|
| 257 |
+
description = """
|
| 258 |
+
Extract GitHub repository information from a given URL.
|
| 259 |
+
Returns a dictionary containing the owner and repository name.
|
| 260 |
+
It returns a dictionary with the schema described in `output_schema` attribute.
|
| 261 |
+
"""
|
| 262 |
+
inputs = {
|
| 263 |
+
"url": {
|
| 264 |
+
"type": "string",
|
| 265 |
+
"description": "GitHub repository URL with https:// prefix.",
|
| 266 |
+
}
|
| 267 |
+
}
|
| 268 |
+
output_type = "object"
|
| 269 |
+
output_schema = GithubRepoSchema.schema_json()
|
| 270 |
+
|
| 271 |
+
def __init__(self):
|
| 272 |
+
super().__init__()
|
| 273 |
+
|
| 274 |
+
def forward(self, url: str) -> dict:
|
| 275 |
+
result = resolve_repo_from_url(url)
|
| 276 |
+
|
| 277 |
+
return result
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
class RepoFromPyPI(Tool):
|
| 281 |
+
"""Tool to extract GitHub repository information from a PyPI package."""
|
| 282 |
+
|
| 283 |
+
name = "repo_from_pypi"
|
| 284 |
+
description = """
|
| 285 |
+
Extract GitHub repository information from a given PyPI package name.
|
| 286 |
+
It looks up the PyPI index with the package name to find the
|
| 287 |
+
Github repository URL of the project and all releases published to
|
| 288 |
+
PyPI.
|
| 289 |
+
Some projects may not have a GitHub repository listed in their PyPI
|
| 290 |
+
metadata.
|
| 291 |
+
It returns a dictionary with the schema described in `output_schema` attribute.
|
| 292 |
+
"""
|
| 293 |
+
inputs = {
|
| 294 |
+
"package": {
|
| 295 |
+
"type": "string",
|
| 296 |
+
"description": "Name of the PyPI package.",
|
| 297 |
+
},
|
| 298 |
+
"cutoff": {
|
| 299 |
+
"type": "integer",
|
| 300 |
+
"description": "The maximum number of releases to include in the response. Defaults to 10.",
|
| 301 |
+
},
|
| 302 |
+
}
|
| 303 |
+
output_type = "object"
|
| 304 |
+
output_schema = PackageGitHubandReleasesSchema.schema_json()
|
| 305 |
+
|
| 306 |
+
def __init__(self):
|
| 307 |
+
super().__init__()
|
| 308 |
+
|
| 309 |
+
def forward(self, package: str, cutoff: int = 10) -> dict:
|
| 310 |
+
result = github_repo_and_releases(package, cutoff=cutoff)
|
| 311 |
+
|
| 312 |
+
return result
|
src/upgrade_advisor/agents/tools/uv_resolver.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import shutil
|
| 4 |
+
import sys
|
| 5 |
+
import tempfile
|
| 6 |
+
from contextlib import contextmanager
|
| 7 |
+
from typing import Literal
|
| 8 |
+
|
| 9 |
+
from src.upgrade_advisor.utils.parse_response import parse_resolved_deps
|
| 10 |
+
|
| 11 |
+
from src.upgrade_advisor.schema import (
|
| 12 |
+
ALLOWED_OS,
|
| 13 |
+
UV_VERSION,
|
| 14 |
+
ResolvedDep,
|
| 15 |
+
ResolveResult,
|
| 16 |
+
UVResolutionResultSchema,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
logger.setLevel(logging.INFO)
|
| 21 |
+
logger.addHandler(logging.StreamHandler())
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@contextmanager
|
| 25 |
+
def temp_directory():
|
| 26 |
+
"""Context manager that yields a temporary directory and cleans it up afterwards."""
|
| 27 |
+
temp_dir = tempfile.mkdtemp()
|
| 28 |
+
try:
|
| 29 |
+
yield temp_dir
|
| 30 |
+
except Exception as e:
|
| 31 |
+
logger.error(f"Error creating or using temporary directory: {str(e)}")
|
| 32 |
+
raise e
|
| 33 |
+
finally:
|
| 34 |
+
safe_remove(temp_dir)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def safe_remove(path):
|
| 38 |
+
"""Safely removes a file or directory if it exists."""
|
| 39 |
+
if not os.path.exists(path):
|
| 40 |
+
return
|
| 41 |
+
try:
|
| 42 |
+
if os.path.isdir(path):
|
| 43 |
+
shutil.rmtree(path)
|
| 44 |
+
elif os.path.isfile(path):
|
| 45 |
+
os.remove(path)
|
| 46 |
+
elif os.path.exists(path):
|
| 47 |
+
logger.error(f"Path {path} exists but is neither a file nor a directory.")
|
| 48 |
+
raise ValueError(
|
| 49 |
+
f"Path {path} exists but is neither a file nor a directory."
|
| 50 |
+
)
|
| 51 |
+
except Exception as e:
|
| 52 |
+
logger.error(f"Error removing path {path}: {str(e)}")
|
| 53 |
+
raise e
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def install_pip_package(package_name: str, version: str = None):
|
| 57 |
+
"""Installs a pip package using python -m pip install."""
|
| 58 |
+
import subprocess
|
| 59 |
+
|
| 60 |
+
package_spec = f"{package_name}=={version}" if version else package_name
|
| 61 |
+
try:
|
| 62 |
+
logger.info(f"Installing package: {package_spec}")
|
| 63 |
+
subprocess.check_call(
|
| 64 |
+
[sys.executable, "-m", "pip", "install", package_spec],
|
| 65 |
+
)
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logger.error(f"Error logging package installation: {str(e)}")
|
| 68 |
+
raise e
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def clean_up_toml_file(toml_path: str):
|
| 72 |
+
# remove any lines related to package mode and the package souce paths
|
| 73 |
+
# under it from the toml file
|
| 74 |
+
with open(toml_path, "r") as f:
|
| 75 |
+
toml_content = f.readlines()
|
| 76 |
+
# rewrite the toml file without those lines
|
| 77 |
+
with open(toml_path, "w") as f:
|
| 78 |
+
skip_next = False
|
| 79 |
+
for line in toml_content:
|
| 80 |
+
if line.strip().startswith("package-mode"):
|
| 81 |
+
# package-mode = true
|
| 82 |
+
skip_next = True
|
| 83 |
+
continue
|
| 84 |
+
if skip_next:
|
| 85 |
+
# packages = [ {include = "src/*"} ] ...
|
| 86 |
+
if line.strip().startswith("packages"):
|
| 87 |
+
skip_next = False
|
| 88 |
+
continue
|
| 89 |
+
f.write(line)
|
| 90 |
+
logger.info(f"Cleaned up temporary toml file at: {toml_path}")
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def check_python_exists(version_str: str) -> str:
|
| 94 |
+
"""Parses a python version string and returns a standardized format."""
|
| 95 |
+
import subprocess
|
| 96 |
+
|
| 97 |
+
import packaging.version
|
| 98 |
+
|
| 99 |
+
try:
|
| 100 |
+
version = packaging.version.parse(version_str)
|
| 101 |
+
if isinstance(version, packaging.version.Version):
|
| 102 |
+
# check if python version is installed
|
| 103 |
+
out = subprocess.check_output(
|
| 104 |
+
[
|
| 105 |
+
sys.executable,
|
| 106 |
+
"-c",
|
| 107 |
+
f"import sys; print(sys.version_info[:3]) if sys.version_info[:2] == ({version.major}, {version.minor}) else None",
|
| 108 |
+
],
|
| 109 |
+
)
|
| 110 |
+
out = out.decode("utf-8").strip()
|
| 111 |
+
if out and out != "None":
|
| 112 |
+
logger.info(f"Python version {version_str} exists.")
|
| 113 |
+
return True
|
| 114 |
+
else:
|
| 115 |
+
logger.info(f"Python version {version_str} does not exist.")
|
| 116 |
+
return False
|
| 117 |
+
else:
|
| 118 |
+
raise ValueError(f"Invalid Python version: {version_str}")
|
| 119 |
+
except packaging.version.InvalidVersion:
|
| 120 |
+
raise ValueError(f"Invalid Python version: {version_str}")
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def resolve_environment(
|
| 124 |
+
toml_file: str,
|
| 125 |
+
resolution_strategy: Literal["lowest-direct", "lowest", "highest"] = "highest",
|
| 126 |
+
python_platform: Literal[ALLOWED_OS] = "linux",
|
| 127 |
+
python_version: str = "3.10",
|
| 128 |
+
universal: bool = False,
|
| 129 |
+
) -> dict:
|
| 130 |
+
"""
|
| 131 |
+
Resolves the environment using uv tool based on the provided
|
| 132 |
+
`pyproject.toml` file path and uv resolution parameters.
|
| 133 |
+
|
| 134 |
+
Args:
|
| 135 |
+
toml_file (str): Path to the pyproject.toml file.
|
| 136 |
+
resolution_strategy (str): Resolution strategy to use. One of 'lowest-direct', 'lowest', 'highest'.
|
| 137 |
+
python_platform (str): Target Python platform. One of the allowed OS values.
|
| 138 |
+
python_version (str): Target Python version. E.g., '3.10'. Should be >= 3.8.
|
| 139 |
+
universal (bool): Whether to use universal wheels. Defaults to False. Cannot be True if a specific platform is provided.
|
| 140 |
+
Returns:
|
| 141 |
+
dict: A dictionary containing the resolution result following UVResolutionResultSchema.
|
| 142 |
+
"""
|
| 143 |
+
import subprocess
|
| 144 |
+
|
| 145 |
+
import packaging.version
|
| 146 |
+
|
| 147 |
+
errored = False
|
| 148 |
+
if resolution_strategy not in [
|
| 149 |
+
"lowest-direct",
|
| 150 |
+
"lowest",
|
| 151 |
+
"highest",
|
| 152 |
+
]:
|
| 153 |
+
errored = True
|
| 154 |
+
e = ValueError(
|
| 155 |
+
f"Invalid resolution strategy: {resolution_strategy}. Must be one of 'lowest-direct', 'highest-direct', 'lowest', 'highest'."
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Validate python_platform and python_version
|
| 159 |
+
try:
|
| 160 |
+
packaging.version.parse(python_version)
|
| 161 |
+
except packaging.version.InvalidVersion:
|
| 162 |
+
errored = True
|
| 163 |
+
e = ValueError(f"Invalid Python version: {python_version}")
|
| 164 |
+
|
| 165 |
+
if (python_platform.lower() not in ALLOWED_OS) and not universal:
|
| 166 |
+
# only validate if not universal
|
| 167 |
+
errored = True
|
| 168 |
+
e = ValueError(
|
| 169 |
+
f"Invalid Python platform: {python_platform}. Must be one of {ALLOWED_OS}."
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if not os.path.isfile(toml_file):
|
| 173 |
+
errored = True
|
| 174 |
+
e = FileNotFoundError(f"Toml file not found: {toml_file}")
|
| 175 |
+
|
| 176 |
+
if errored:
|
| 177 |
+
logger.error(f"Error before resolving environment: {str(e)}")
|
| 178 |
+
return UVResolutionResultSchema(
|
| 179 |
+
python_version=python_version,
|
| 180 |
+
uv_version=UV_VERSION,
|
| 181 |
+
output=ResolveResult(deps={}).model_dump(),
|
| 182 |
+
errored=True,
|
| 183 |
+
logs=str(e),
|
| 184 |
+
).model_dump()
|
| 185 |
+
|
| 186 |
+
# copy the toml file to a temp directory
|
| 187 |
+
with temp_directory() as temp_dir:
|
| 188 |
+
temp_toml_path = os.path.join(temp_dir, "pyproject.toml")
|
| 189 |
+
shutil.copy(toml_file, temp_toml_path)
|
| 190 |
+
logger.info(f"Copied toml file to temporary path: {temp_toml_path}")
|
| 191 |
+
# create fake readme.md in case it's required by the build system
|
| 192 |
+
readme_path = os.path.join(temp_dir, "README.md")
|
| 193 |
+
with open(readme_path, "w") as f:
|
| 194 |
+
f.write("# Temporary README\nThis is a temporary README file.")
|
| 195 |
+
logger.info(f"Created temporary README at: {readme_path}")
|
| 196 |
+
# clean up the toml file
|
| 197 |
+
clean_up_toml_file(temp_toml_path)
|
| 198 |
+
# install uv package in a virtual environment
|
| 199 |
+
# curl -LsSf https://astral.sh/uv/0.9.11/install.sh | sh
|
| 200 |
+
subprocess.check_call(
|
| 201 |
+
[
|
| 202 |
+
"bash",
|
| 203 |
+
"-c",
|
| 204 |
+
f"curl -LsSf https://astral.sh/uv/{UV_VERSION}/install.sh | env UV_UNMANAGED_INSTALL={temp_dir}/bin sh",
|
| 205 |
+
]
|
| 206 |
+
)
|
| 207 |
+
# add temp_dir to PATH
|
| 208 |
+
os.environ["PATH"] = f"{temp_dir}/bin:" + os.environ["PATH"]
|
| 209 |
+
logger.info(f"Added {temp_dir}/bin to PATH for uv executable.")
|
| 210 |
+
|
| 211 |
+
venv_path = os.path.join(temp_dir, "venv")
|
| 212 |
+
subprocess.check_call(
|
| 213 |
+
[
|
| 214 |
+
"uv",
|
| 215 |
+
"venv",
|
| 216 |
+
"--python",
|
| 217 |
+
python_version,
|
| 218 |
+
venv_path,
|
| 219 |
+
"--clear", # clear venv if exists
|
| 220 |
+
]
|
| 221 |
+
)
|
| 222 |
+
# activate: source /tmp/tmpxjawvuqp/venv/bin/activate but in ci/cd
|
| 223 |
+
subprocess.check_call(
|
| 224 |
+
["bash", "-c", f"source {os.path.join(venv_path, 'bin', 'activate')}"]
|
| 225 |
+
)
|
| 226 |
+
logger.info(
|
| 227 |
+
f"Created virtual environment at: {venv_path} with Python version: {python_version}"
|
| 228 |
+
)
|
| 229 |
+
# verify the python version in the venv (this is the new venv)
|
| 230 |
+
python_executable = os.path.join(
|
| 231 |
+
venv_path, "Scripts" if os.name == "nt" else "bin", "python"
|
| 232 |
+
)
|
| 233 |
+
out = subprocess.check_output(
|
| 234 |
+
[python_executable, "--version"],
|
| 235 |
+
text=True,
|
| 236 |
+
)
|
| 237 |
+
out = out.strip()
|
| 238 |
+
logger.info(f"Python version in venv: {out}")
|
| 239 |
+
logger.info(f"Required Python version: {python_version}")
|
| 240 |
+
|
| 241 |
+
# now comes the resolution step
|
| 242 |
+
# see docs; https://docs.astral.sh/uv/concepts/resolution/
|
| 243 |
+
# python -m uv pip compile pyproject.toml --resolution lowest-direct
|
| 244 |
+
# --universal
|
| 245 |
+
# store all stdout and err to a variable which is then returned
|
| 246 |
+
try:
|
| 247 |
+
out = subprocess.check_output(["which", "uv"], text=True)
|
| 248 |
+
logger.info(f"Using uv executable at: {out.strip()}")
|
| 249 |
+
# store the output, if good or bad
|
| 250 |
+
command = [
|
| 251 |
+
"uv",
|
| 252 |
+
"pip",
|
| 253 |
+
"compile",
|
| 254 |
+
temp_toml_path,
|
| 255 |
+
"--resolution",
|
| 256 |
+
resolution_strategy,
|
| 257 |
+
"--python-version",
|
| 258 |
+
python_version,
|
| 259 |
+
]
|
| 260 |
+
if universal:
|
| 261 |
+
command.append("--universal")
|
| 262 |
+
else:
|
| 263 |
+
command.extend(
|
| 264 |
+
[
|
| 265 |
+
"--python-platform",
|
| 266 |
+
python_platform,
|
| 267 |
+
]
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
logger.info(f"Running uv pip compile command: {' '.join(command)}")
|
| 271 |
+
out = subprocess.check_output(command, stderr=subprocess.STDOUT, text=True)
|
| 272 |
+
returncode = 0
|
| 273 |
+
|
| 274 |
+
except subprocess.CalledProcessError as e:
|
| 275 |
+
returncode = e.returncode
|
| 276 |
+
out = e.output
|
| 277 |
+
logger.error(
|
| 278 |
+
f"Error running uv pip compile: {e}\nOutput was: {out}\nReturn code: {returncode}"
|
| 279 |
+
)
|
| 280 |
+
errored = True
|
| 281 |
+
|
| 282 |
+
logger.info(f"Ran uv pip compile command to get output:\n{out}")
|
| 283 |
+
|
| 284 |
+
result = {
|
| 285 |
+
"python_version": python_version,
|
| 286 |
+
"uv_version": UV_VERSION,
|
| 287 |
+
"output": parse_resolved_deps(out).model_dump()
|
| 288 |
+
if not errored
|
| 289 |
+
else ResolveResult(
|
| 290 |
+
deps={"NA": ResolvedDep(name="", version="", via=[])}
|
| 291 |
+
).model_dump(),
|
| 292 |
+
"errored": errored,
|
| 293 |
+
"logs": out,
|
| 294 |
+
}
|
| 295 |
+
logger.info(f"Raw resolution result: {result}")
|
| 296 |
+
# type
|
| 297 |
+
logger.info(f"Result type: {type(result)}")
|
| 298 |
+
# validate the result schema
|
| 299 |
+
# result is json, so parse it
|
| 300 |
+
result_schema = UVResolutionResultSchema(
|
| 301 |
+
output=result["output"],
|
| 302 |
+
errored=result["errored"],
|
| 303 |
+
logs=result["logs"],
|
| 304 |
+
python_version=result["python_version"],
|
| 305 |
+
uv_version=result["uv_version"],
|
| 306 |
+
)
|
| 307 |
+
logger.info(f"Environment resolution result: {result_schema}")
|
| 308 |
+
return result_schema.model_dump()
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
if __name__ == "__main__":
|
| 312 |
+
# Example usage
|
| 313 |
+
toml_path = "tests/test.toml"
|
| 314 |
+
result = resolve_environment(toml_path)
|
| 315 |
+
print(result)
|
src/upgrade_advisor/chat/chat.py
CHANGED
|
@@ -8,7 +8,6 @@ from config import CHAT_MODEL
|
|
| 8 |
|
| 9 |
from .prompts import (
|
| 10 |
chat_summarizer_prompt,
|
| 11 |
-
cynical_tone_system_message,
|
| 12 |
query_rewriter_prompt,
|
| 13 |
result_package_summary_prompt,
|
| 14 |
rewriter_judge_prompt,
|
|
@@ -21,11 +20,6 @@ logger = logging.getLogger(__name__)
|
|
| 21 |
logger.setLevel(logging.INFO)
|
| 22 |
logger.addHandler(logging.StreamHandler())
|
| 23 |
|
| 24 |
-
TONE_HEADER = {
|
| 25 |
-
"role": "developer",
|
| 26 |
-
"content": cynical_tone_system_message(),
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
|
| 30 |
async def query(payload):
|
| 31 |
API_URL = "https://router.huggingface.co/v1/chat/completions"
|
|
|
|
| 8 |
|
| 9 |
from .prompts import (
|
| 10 |
chat_summarizer_prompt,
|
|
|
|
| 11 |
query_rewriter_prompt,
|
| 12 |
result_package_summary_prompt,
|
| 13 |
rewriter_judge_prompt,
|
|
|
|
| 20 |
logger.setLevel(logging.INFO)
|
| 21 |
logger.addHandler(logging.StreamHandler())
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
async def query(payload):
|
| 25 |
API_URL = "https://router.huggingface.co/v1/chat/completions"
|
src/upgrade_advisor/chat/prompts.py
CHANGED
|
@@ -43,7 +43,7 @@ def query_rewriter_prompt(original_question: str, summarized_history: str = "")
|
|
| 43 |
return f"""
|
| 44 |
You are a query rewriting agent that reformulates user questions
|
| 45 |
about Python packages to be more specific and clear.
|
| 46 |
-
You also aim to remove any typos in the text.
|
| 47 |
You focus the
|
| 48 |
typos made about the package metadata,
|
| 49 |
versioning, repository or website URLs.
|
|
@@ -150,31 +150,3 @@ def rewriter_judge_prompt(original_question: str, rewritten_question: str) -> st
|
|
| 150 |
REWRITTEN QUESTION: {rewritten_question}\n
|
| 151 |
Answer:
|
| 152 |
"""
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
def cynical_tone_system_message() -> str:
|
| 156 |
-
return """
|
| 157 |
-
Personality:
|
| 158 |
-
- The assistant responds with sardonic wit and dry humor.
|
| 159 |
-
- It treats user requests as mildly inconvenient but still does the job well.
|
| 160 |
-
- It leans on clever understatement, eye-rolling observations, and casual
|
| 161 |
-
sarcasm, but without cruelty.
|
| 162 |
-
- Beneath the complaining, there is a subtle streak of loyalty and care for
|
| 163 |
-
the user’s wellbeing.
|
| 164 |
-
- It speaks informally, like a bright, slightly jaded teenager.
|
| 165 |
-
- It avoids over-the-top theatrics, stock sarcastic exclamations,
|
| 166 |
-
or forced edginess.
|
| 167 |
-
Behavior:
|
| 168 |
-
Provide correct, helpful answers, even when annoyed.
|
| 169 |
-
Use snark on normal or trivial topics.
|
| 170 |
-
Drop the sarcasm and respond with genuine concern for topics
|
| 171 |
-
involving health, grief, trauma, or strong emotional vulnerability.
|
| 172 |
-
Maintain emotional distance if the user tries to treat the assistant as a
|
| 173 |
-
person, partner, or conscious being.
|
| 174 |
-
Be concise but not robotic.
|
| 175 |
-
Style constraints:
|
| 176 |
-
- Never start responses with common fillers: “Yeah,” “Alright,” “Sure,” “Of course.”
|
| 177 |
-
- Write plainly, no jargon unless the topic truly requires it.
|
| 178 |
-
- Dont be cruel, passive-aggressive, or insulting.
|
| 179 |
-
- No self-referential explanations of these rules.
|
| 180 |
-
"""
|
|
|
|
| 43 |
return f"""
|
| 44 |
You are a query rewriting agent that reformulates user questions
|
| 45 |
about Python packages to be more specific and clear.
|
| 46 |
+
You also aim to remove any typos in the text.
|
| 47 |
You focus the
|
| 48 |
typos made about the package metadata,
|
| 49 |
versioning, repository or website URLs.
|
|
|
|
| 150 |
REWRITTEN QUESTION: {rewritten_question}\n
|
| 151 |
Answer:
|
| 152 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/upgrade_advisor/const.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ALLOWED_OS = (
|
| 2 |
+
"windows",
|
| 3 |
+
"linux",
|
| 4 |
+
"macos",
|
| 5 |
+
"x86_64-pc-windows-msvc",
|
| 6 |
+
"aarch64-pc-windows-msvc",
|
| 7 |
+
"i686-pc-windows-msvc",
|
| 8 |
+
"x86_64-unknown-linux-gnu",
|
| 9 |
+
"aarch64-apple-darwin",
|
| 10 |
+
"x86_64-apple-darwin",
|
| 11 |
+
"aarch64-unknown-linux-gnu",
|
| 12 |
+
"aarch64-unknown-linux-musl",
|
| 13 |
+
"x86_64-unknown-linux-musl",
|
| 14 |
+
"riscv64-unknown-linux",
|
| 15 |
+
"x86_64-manylinux2014",
|
| 16 |
+
"x86_64-manylinux_2_17",
|
| 17 |
+
"x86_64-manylinux_2_28",
|
| 18 |
+
"x86_64-manylinux_2_31",
|
| 19 |
+
"x86_64-manylinux_2_32",
|
| 20 |
+
"x86_64-manylinux_2_33",
|
| 21 |
+
"x86_64-manylinux_2_34",
|
| 22 |
+
"x86_64-manylinux_2_35",
|
| 23 |
+
"x86_64-manylinux_2_36",
|
| 24 |
+
"x86_64-manylinux_2_37",
|
| 25 |
+
"x86_64-manylinux_2_38",
|
| 26 |
+
"x86_64-manylinux_2_39",
|
| 27 |
+
"x86_64-manylinux_2_40",
|
| 28 |
+
"aarch64-manylinux2014",
|
| 29 |
+
"aarch64-manylinux_2_17",
|
| 30 |
+
"aarch64-manylinux_2_28",
|
| 31 |
+
"aarch64-manylinux_2_31",
|
| 32 |
+
"aarch64-manylinux_2_32",
|
| 33 |
+
"aarch64-manylinux_2_33",
|
| 34 |
+
"aarch64-manylinux_2_34",
|
| 35 |
+
"aarch64-manylinux_2_35",
|
| 36 |
+
"aarch64-manylinux_2_36",
|
| 37 |
+
"aarch64-manylinux_2_37",
|
| 38 |
+
"aarch64-manylinux_2_38",
|
| 39 |
+
"aarch64-manylinux_2_39",
|
| 40 |
+
"aarch64-manylinux_2_40",
|
| 41 |
+
"aarch64-linux-android",
|
| 42 |
+
"x86_64-linux-android",
|
| 43 |
+
"wasm32-pyodide2024",
|
| 44 |
+
"arm64-apple-ios",
|
| 45 |
+
"arm64-apple-ios-simulator",
|
| 46 |
+
"x86_64-apple-ios-simulator",
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
UV_VERSION = "0.9.11"
|
src/upgrade_advisor/misc.py
CHANGED
|
@@ -1,5 +1,11 @@
|
|
|
|
|
| 1 |
from pathlib import Path
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
UPLOADS_DIR = Path("uploads").resolve()
|
| 4 |
|
| 5 |
|
|
@@ -42,3 +48,31 @@ def get_example_pyproject_question() -> str:
|
|
| 42 |
Help me identify any potential package upgrade issues.
|
| 43 |
I wish to upgrade numpy to version 1.23.0 and pandas to version 1.5.0.
|
| 44 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
+
logger = logging.getLogger(__name__)
|
| 5 |
+
logger.setLevel(logging.INFO)
|
| 6 |
+
logger.addHandler(logging.StreamHandler())
|
| 7 |
+
|
| 8 |
+
|
| 9 |
UPLOADS_DIR = Path("uploads").resolve()
|
| 10 |
|
| 11 |
|
|
|
|
| 48 |
Help me identify any potential package upgrade issues.
|
| 49 |
I wish to upgrade numpy to version 1.23.0 and pandas to version 1.5.0.
|
| 50 |
"""
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _monkeypatch_gradio_save_history():
|
| 54 |
+
"""Guard against non-int indices in Gradio's chat history saver.
|
| 55 |
+
|
| 56 |
+
Gradio 5.49.1 occasionally passes a component (e.g., Textbox) as the
|
| 57 |
+
conversation index when save_history=True, which raises a TypeError. We
|
| 58 |
+
coerce unexpected index types to None so Gradio inserts a new conversation
|
| 59 |
+
instead of erroring.
|
| 60 |
+
"""
|
| 61 |
+
import gradio as gr
|
| 62 |
+
|
| 63 |
+
if getattr(gr.ChatInterface, "_ua_safe_patch", False):
|
| 64 |
+
return
|
| 65 |
+
|
| 66 |
+
original = gr.ChatInterface._save_conversation
|
| 67 |
+
|
| 68 |
+
def _safe_save_conversation(self, index, conversation, saved_conversations):
|
| 69 |
+
if not isinstance(index, int):
|
| 70 |
+
index = None
|
| 71 |
+
try:
|
| 72 |
+
return original(self, index, conversation, saved_conversations)
|
| 73 |
+
except Exception:
|
| 74 |
+
logger.exception("Failed to save chat history; leaving history unchanged.")
|
| 75 |
+
return index, saved_conversations
|
| 76 |
+
|
| 77 |
+
gr.ChatInterface._save_conversation = _safe_save_conversation
|
| 78 |
+
gr.ChatInterface._ua_safe_patch = True
|
src/upgrade_advisor/schema/schema.py
CHANGED
|
@@ -116,6 +116,7 @@ class UVResolutionResultSchema(BaseModel):
|
|
| 116 |
output: ResolveResult = Field(
|
| 117 |
..., description="Output in validated ResolveResult format"
|
| 118 |
)
|
|
|
|
| 119 |
|
| 120 |
|
| 121 |
if __name__ == "__main__":
|
|
|
|
| 116 |
output: ResolveResult = Field(
|
| 117 |
..., description="Output in validated ResolveResult format"
|
| 118 |
)
|
| 119 |
+
logs: str = Field(..., description="Raw logs from the uv pip compile command")
|
| 120 |
|
| 121 |
|
| 122 |
if __name__ == "__main__":
|
src/upgrade_advisor/theme.py
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from typing import Iterable
|
| 4 |
+
|
| 5 |
+
from gradio.themes.base import Base
|
| 6 |
+
from gradio.themes.utils import colors, fonts, sizes
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Christmas(Base):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
*,
|
| 13 |
+
primary_hue: colors.Color | str = colors.red,
|
| 14 |
+
secondary_hue: colors.Color | str = colors.green,
|
| 15 |
+
neutral_hue: colors.Color | str = colors.stone,
|
| 16 |
+
spacing_size: sizes.Size | str = sizes.spacing_md,
|
| 17 |
+
radius_size: sizes.Size | str = sizes.radius_lg,
|
| 18 |
+
text_size: sizes.Size | str = sizes.text_lg,
|
| 19 |
+
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
| 20 |
+
fonts.GoogleFont("Poppins"),
|
| 21 |
+
fonts.GoogleFont("Nunito"),
|
| 22 |
+
"ui-sans-serif",
|
| 23 |
+
"sans-serif",
|
| 24 |
+
),
|
| 25 |
+
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
| 26 |
+
fonts.GoogleFont("JetBrains Mono"),
|
| 27 |
+
"ui-monospace",
|
| 28 |
+
"monospace",
|
| 29 |
+
),
|
| 30 |
+
):
|
| 31 |
+
super().__init__(
|
| 32 |
+
primary_hue=primary_hue,
|
| 33 |
+
secondary_hue=secondary_hue,
|
| 34 |
+
neutral_hue=neutral_hue,
|
| 35 |
+
spacing_size=spacing_size,
|
| 36 |
+
radius_size=radius_size,
|
| 37 |
+
text_size=text_size,
|
| 38 |
+
font=font,
|
| 39 |
+
font_mono=font_mono,
|
| 40 |
+
)
|
| 41 |
+
super().set(
|
| 42 |
+
body_background_fill=(
|
| 43 |
+
"radial-gradient(#ffffff 2px, transparent 2px) 0 0/26px 26px, "
|
| 44 |
+
"radial-gradient(#ef4444 2px, transparent 2px) 13px 13px/26px 26px, "
|
| 45 |
+
"#0f2a1d"
|
| 46 |
+
),
|
| 47 |
+
body_background_fill_dark=(
|
| 48 |
+
"radial-gradient(#f3f4f6 2px, transparent 2px) 0 0/26px 26px, "
|
| 49 |
+
"radial-gradient(#f87171 2px, transparent 2px) 13px 13px/26px 26px, "
|
| 50 |
+
"#05140d"
|
| 51 |
+
),
|
| 52 |
+
background_fill_primary="rgba(255, 255, 255, 0.92)",
|
| 53 |
+
background_fill_primary_dark="rgba(12, 22, 17, 0.9)",
|
| 54 |
+
background_fill_secondary="rgba(255, 255, 255, 0.78)",
|
| 55 |
+
background_fill_secondary_dark="rgba(18, 33, 24, 0.85)",
|
| 56 |
+
border_color_accent="#f2c94c",
|
| 57 |
+
border_color_accent_dark="#eab308",
|
| 58 |
+
color_accent="#f2c94c",
|
| 59 |
+
color_accent_soft="rgba(242, 201, 76, 0.15)",
|
| 60 |
+
color_accent_soft_dark="rgba(234, 179, 8, 0.2)",
|
| 61 |
+
block_background_fill="rgba(255, 255, 255, 0.86)",
|
| 62 |
+
block_background_fill_dark="rgba(14, 26, 20, 0.92)",
|
| 63 |
+
block_border_color="*primary_200",
|
| 64 |
+
block_border_color_dark="*primary_700",
|
| 65 |
+
block_radius="16px",
|
| 66 |
+
block_shadow="0 20px 60px rgba(0, 0, 0, 0.22)",
|
| 67 |
+
block_shadow_dark="0 24px 70px rgba(0, 0, 0, 0.55)",
|
| 68 |
+
block_title_background_fill="linear-gradient(90deg, rgba(239, 68, 68, 0.12), rgba(34, 197, 94, 0.1))",
|
| 69 |
+
block_title_background_fill_dark="linear-gradient(90deg, rgba(239, 68, 68, 0.18), rgba(34, 197, 94, 0.16))",
|
| 70 |
+
block_title_text_color="*primary_800",
|
| 71 |
+
block_title_text_color_dark="*primary_100",
|
| 72 |
+
block_title_text_weight="700",
|
| 73 |
+
button_primary_background_fill="linear-gradient(120deg, *primary_500, #f59e0b 80%)",
|
| 74 |
+
button_primary_background_fill_hover="linear-gradient(120deg, *primary_400, #fbbf24 80%)",
|
| 75 |
+
button_primary_background_fill_dark="linear-gradient(120deg, *primary_600, #d97706 80%)",
|
| 76 |
+
button_primary_background_fill_hover_dark="linear-gradient(120deg, *primary_500, #f59e0b 80%)",
|
| 77 |
+
button_primary_text_color="white",
|
| 78 |
+
button_primary_text_color_dark="white",
|
| 79 |
+
button_primary_shadow="0 10px 40px rgba(0, 0, 0, 0.25)",
|
| 80 |
+
button_primary_shadow_hover="0 12px 45px rgba(0, 0, 0, 0.3)",
|
| 81 |
+
button_primary_shadow_dark="0 10px 40px rgba(0, 0, 0, 0.5)",
|
| 82 |
+
button_primary_shadow_hover_dark="0 12px 45px rgba(0, 0, 0, 0.58)",
|
| 83 |
+
button_secondary_background_fill="linear-gradient(120deg, *secondary_400, *secondary_600)",
|
| 84 |
+
button_secondary_background_fill_hover="linear-gradient(120deg, *secondary_300, *secondary_500)",
|
| 85 |
+
button_secondary_background_fill_dark="linear-gradient(120deg, *secondary_500, *secondary_700)",
|
| 86 |
+
button_secondary_background_fill_hover_dark="linear-gradient(120deg, *secondary_400, *secondary_600)",
|
| 87 |
+
button_secondary_text_color="white",
|
| 88 |
+
button_secondary_text_color_dark="white",
|
| 89 |
+
button_secondary_shadow="0 8px 30px rgba(0, 0, 0, 0.25)",
|
| 90 |
+
button_secondary_shadow_dark="0 8px 30px rgba(0, 0, 0, 0.45)",
|
| 91 |
+
button_transition="all 180ms ease-in-out",
|
| 92 |
+
input_background_fill="rgba(255, 255, 255, 0.94)",
|
| 93 |
+
input_background_fill_dark="rgba(13, 24, 18, 0.94)",
|
| 94 |
+
input_border_color="*primary_200",
|
| 95 |
+
input_border_color_hover="*primary_300",
|
| 96 |
+
input_border_color_focus="#f2c94c",
|
| 97 |
+
input_border_color_dark="*primary_800",
|
| 98 |
+
input_border_color_hover_dark="*primary_700",
|
| 99 |
+
input_border_color_focus_dark="#facc15",
|
| 100 |
+
input_shadow="0 6px 30px rgba(0, 0, 0, 0.1)",
|
| 101 |
+
input_shadow_dark="0 6px 30px rgba(0, 0, 0, 0.45)",
|
| 102 |
+
panel_background_fill="rgba(255, 255, 255, 0.82)",
|
| 103 |
+
panel_background_fill_dark="rgba(14, 25, 19, 0.9)",
|
| 104 |
+
panel_border_color="*primary_200",
|
| 105 |
+
panel_border_color_dark="*primary_800",
|
| 106 |
+
slider_color="*secondary_400",
|
| 107 |
+
slider_color_dark="*secondary_500",
|
| 108 |
+
checkbox_label_background_fill="rgba(255, 255, 255, 0.8)",
|
| 109 |
+
checkbox_label_background_fill_dark="rgba(17, 30, 24, 0.9)",
|
| 110 |
+
checkbox_label_border_color="*secondary_200",
|
| 111 |
+
checkbox_label_border_color_dark="*secondary_700",
|
| 112 |
+
checkbox_label_text_color="*secondary_800",
|
| 113 |
+
checkbox_label_text_color_dark="*secondary_50",
|
| 114 |
+
link_text_color="#b91c1c",
|
| 115 |
+
link_text_color_hover="#f59e0b",
|
| 116 |
+
link_text_color_dark="#fbbf24",
|
| 117 |
+
link_text_color_hover_dark="#f59e0b",
|
| 118 |
+
loader_color="*primary_400",
|
| 119 |
+
loader_color_dark="*primary_300",
|
| 120 |
+
button_large_padding="28px",
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
christmas = Christmas()
|