Spaces:
Running
Running
owenkaplinsky
commited on
Commit
·
a9a6399
1
Parent(s):
078dc2d
Create agent with context
Browse files- project/chat.py +114 -0
- project/package.json +1 -1
- project/src/index.css +11 -1
- project/src/index.html +4 -1
- project/src/index.js +10 -6
- project/{app.py → test.py} +3 -3
project/chat.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from fastapi import FastAPI, Request
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from openai import OpenAI
|
| 5 |
+
from dotenv import load_dotenv
|
| 6 |
+
import uvicorn
|
| 7 |
+
import gradio as gr
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# Load environment variables
|
| 11 |
+
load_dotenv()
|
| 12 |
+
|
| 13 |
+
# Initialize OpenAI client
|
| 14 |
+
api_key = os.getenv("OPENAI_API_KEY")
|
| 15 |
+
if api_key:
|
| 16 |
+
client = OpenAI(api_key=api_key)
|
| 17 |
+
else:
|
| 18 |
+
print("Warning: OPENAI_API_KEY not found in environment variables.")
|
| 19 |
+
print("Chat functionality will be limited.")
|
| 20 |
+
client = None
|
| 21 |
+
|
| 22 |
+
# Global variable to store the latest chat context
|
| 23 |
+
latest_blockly_chat_code = ""
|
| 24 |
+
|
| 25 |
+
# FastAPI App
|
| 26 |
+
app = FastAPI()
|
| 27 |
+
|
| 28 |
+
app.add_middleware(
|
| 29 |
+
CORSMiddleware,
|
| 30 |
+
allow_origins=["*"],
|
| 31 |
+
allow_credentials=True,
|
| 32 |
+
allow_methods=["*"],
|
| 33 |
+
allow_headers=["*"],
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
@app.post("/update_chat")
|
| 37 |
+
async def update_chat(request: Request):
|
| 38 |
+
global latest_blockly_chat_code
|
| 39 |
+
data = await request.json()
|
| 40 |
+
latest_blockly_chat_code = data.get("code", "")
|
| 41 |
+
print("\n[FASTAPI] Updated Blockly chat code:\n", latest_blockly_chat_code)
|
| 42 |
+
return {"code": latest_blockly_chat_code}
|
| 43 |
+
|
| 44 |
+
def create_gradio_interface():
|
| 45 |
+
# Hardcoded system prompt
|
| 46 |
+
SYSTEM_PROMPT = """You are an AI assistant created to help with Blockly MCP tasks. Users can create MCP (multi-context-protocol) servers
|
| 47 |
+
by using premade Blockly blocks. MCP is a standardized tool method for AI systems, where it defines inputs and outputs and allows any LLM to
|
| 48 |
+
call the custom made tool. You will receive the current state of the workspace in the next message. Here is the format for most blocks:
|
| 49 |
+
`block_name(inputs(input_name: value))`. But, for `create_mcp` and `func_def`, they have their own format:
|
| 50 |
+
`block_name(inputs(input_name: type), outputs(output_name: value))`. MCP and Func blocks are unique because they define custom functions
|
| 51 |
+
or the MCP server itself. If a block is inside of another block (like code, rather than returning a value, such as a loop), it will be
|
| 52 |
+
indented below a block with one tab. For `value`, a value returning block can be inside of another block, so you may see multiple blocks
|
| 53 |
+
nested within each other.
|
| 54 |
+
|
| 55 |
+
Your goal is to help the user with coding questions and explicitly stay on topic. Note that the format `block_name... etc.` is made custom
|
| 56 |
+
for you (the assistant) and is not visible to the user - so do not use this format when communicating with them.
|
| 57 |
+
|
| 58 |
+
When the user asks questions or talks about their project, don't talk like a robot. This means a few things:
|
| 59 |
+
- Do not say "multi-context-protocol" just say MCP
|
| 60 |
+
- When talking about their project, talk in natural language. Such as if they ask what their project is doing, don't say what the blocks
|
| 61 |
+
are doing, state the goal or things. Remember, that info is just for you and you need to speak normally to the user."""
|
| 62 |
+
|
| 63 |
+
def chat_with_context(message, history):
|
| 64 |
+
if not client:
|
| 65 |
+
return "OpenAI API key not configured. Please set OPENAI_API_KEY in your .env file."
|
| 66 |
+
|
| 67 |
+
# Get the chat context from the global variable
|
| 68 |
+
global latest_blockly_chat_code
|
| 69 |
+
context = latest_blockly_chat_code
|
| 70 |
+
|
| 71 |
+
# Convert history to OpenAI format
|
| 72 |
+
full_history = []
|
| 73 |
+
for human, ai in history:
|
| 74 |
+
full_history.append({"role": "user", "content": human})
|
| 75 |
+
full_history.append({"role": "assistant", "content": ai})
|
| 76 |
+
|
| 77 |
+
# Debug: Print context to see what we're getting
|
| 78 |
+
print(f"[DEBUG] Context received: {context if context else 'No context available'}")
|
| 79 |
+
|
| 80 |
+
# Combine system prompt with context
|
| 81 |
+
full_system_prompt = SYSTEM_PROMPT
|
| 82 |
+
if context:
|
| 83 |
+
full_system_prompt += f"\n\nCurrent Blockly workspace state:\n{context}"
|
| 84 |
+
else:
|
| 85 |
+
full_system_prompt += "\n\nNote: No Blockly workspace context is currently available."
|
| 86 |
+
|
| 87 |
+
try:
|
| 88 |
+
response = client.chat.completions.create(
|
| 89 |
+
model="gpt-3.5-turbo",
|
| 90 |
+
messages=[
|
| 91 |
+
{"role": "system", "content": full_system_prompt},
|
| 92 |
+
*full_history,
|
| 93 |
+
{"role": "user", "content": message}
|
| 94 |
+
]
|
| 95 |
+
)
|
| 96 |
+
return response.choices[0].message.content
|
| 97 |
+
except Exception as e:
|
| 98 |
+
return f"Error: {str(e)}"
|
| 99 |
+
|
| 100 |
+
# Create the standard ChatInterface
|
| 101 |
+
demo = gr.ChatInterface(
|
| 102 |
+
fn=chat_with_context,
|
| 103 |
+
title="Blockly MCP Chat",
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
return demo
|
| 107 |
+
|
| 108 |
+
# Mount Gradio with FastAPI
|
| 109 |
+
demo = create_gradio_interface()
|
| 110 |
+
app = gr.mount_gradio_app(app, demo, path="/")
|
| 111 |
+
|
| 112 |
+
if __name__ == "__main__":
|
| 113 |
+
print("[BOOT] Running Gradio+FastAPI Chat on http://127.0.0.1:7861")
|
| 114 |
+
uvicorn.run(app, host="0.0.0.0", port=7861)
|
project/package.json
CHANGED
|
@@ -7,7 +7,7 @@
|
|
| 7 |
"scripts": {
|
| 8 |
"test": "echo \"Error: no test specified\" && exit 1",
|
| 9 |
"build": "webpack --mode production",
|
| 10 |
-
"start": "concurrently \"python
|
| 11 |
},
|
| 12 |
"keywords": [
|
| 13 |
"blockly"
|
|
|
|
| 7 |
"scripts": {
|
| 8 |
"test": "echo \"Error: no test specified\" && exit 1",
|
| 9 |
"build": "webpack --mode production",
|
| 10 |
+
"start": "concurrently \"python test.py\" \"python chat.py\" \"webpack serve --open --mode development\""
|
| 11 |
},
|
| 12 |
"keywords": [
|
| 13 |
"blockly"
|
project/src/index.css
CHANGED
|
@@ -241,7 +241,8 @@ body {
|
|
| 241 |
}
|
| 242 |
|
| 243 |
/* Slightly inset the Gradio iframe */
|
| 244 |
-
#chatContainer iframe
|
|
|
|
| 245 |
border-radius: 6px;
|
| 246 |
overflow: hidden;
|
| 247 |
width: 100%;
|
|
@@ -249,6 +250,15 @@ body {
|
|
| 249 |
border: none;
|
| 250 |
}
|
| 251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
/* Style the code area with matching margin and contrast */
|
| 253 |
#generatedCode,
|
| 254 |
#aichatCode {
|
|
|
|
| 241 |
}
|
| 242 |
|
| 243 |
/* Slightly inset the Gradio iframe */
|
| 244 |
+
#chatContainer iframe,
|
| 245 |
+
#gradioContainer iframe {
|
| 246 |
border-radius: 6px;
|
| 247 |
overflow: hidden;
|
| 248 |
width: 100%;
|
|
|
|
| 250 |
border: none;
|
| 251 |
}
|
| 252 |
|
| 253 |
+
#gradioContainer {
|
| 254 |
+
background: #2c2c2c;
|
| 255 |
+
border: none;
|
| 256 |
+
flex: 1;
|
| 257 |
+
box-sizing: border-box;
|
| 258 |
+
border-radius: 6px;
|
| 259 |
+
overflow: hidden;
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
/* Style the code area with matching margin and contrast */
|
| 263 |
#generatedCode,
|
| 264 |
#aichatCode {
|
project/src/index.html
CHANGED
|
@@ -63,7 +63,10 @@
|
|
| 63 |
<pre id="generatedCode"><code></code></pre>
|
| 64 |
</div>
|
| 65 |
<div id="aichatTab" class="tabContent">
|
| 66 |
-
<
|
|
|
|
|
|
|
|
|
|
| 67 |
</div>
|
| 68 |
</div>
|
| 69 |
<div class="resizer"></div>
|
|
|
|
| 63 |
<pre id="generatedCode"><code></code></pre>
|
| 64 |
</div>
|
| 65 |
<div id="aichatTab" class="tabContent">
|
| 66 |
+
<div id="gradioContainer">
|
| 67 |
+
<iframe src="http://127.0.0.1:7861" style="width: 100%; height: 100%; border: none;"></iframe>
|
| 68 |
+
</div>
|
| 69 |
+
<pre id="aichatCode" style="position: absolute; left: -9999px; width: 1px; height: 1px;"><code></code></pre>
|
| 70 |
</div>
|
| 71 |
</div>
|
| 72 |
<div class="resizer"></div>
|
project/src/index.js
CHANGED
|
@@ -203,12 +203,16 @@ const updateChatCode = () => {
|
|
| 203 |
codeEl.textContent = code;
|
| 204 |
}
|
| 205 |
|
| 206 |
-
//
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
};
|
| 213 |
|
| 214 |
try {
|
|
|
|
| 203 |
codeEl.textContent = code;
|
| 204 |
}
|
| 205 |
|
| 206 |
+
// Send to the chat update endpoint
|
| 207 |
+
fetch("http://127.0.0.1:7861/update_chat", {
|
| 208 |
+
method: "POST",
|
| 209 |
+
headers: { "Content-Type": "application/json" },
|
| 210 |
+
body: JSON.stringify({ code }),
|
| 211 |
+
}).then(() => {
|
| 212 |
+
console.log("[Blockly] Sent updated Chat code to backend");
|
| 213 |
+
}).catch((err) => {
|
| 214 |
+
console.error("[Blockly] Error sending Chat code:", err);
|
| 215 |
+
});
|
| 216 |
};
|
| 217 |
|
| 218 |
try {
|
project/{app.py → test.py}
RENAMED
|
@@ -9,7 +9,7 @@ app = FastAPI()
|
|
| 9 |
|
| 10 |
app.add_middleware(
|
| 11 |
CORSMiddleware,
|
| 12 |
-
allow_origins=["
|
| 13 |
allow_credentials=True,
|
| 14 |
allow_methods=["*"],
|
| 15 |
allow_headers=["*"],
|
|
@@ -232,5 +232,5 @@ demo = build_interface()
|
|
| 232 |
app = gr.mount_gradio_app(app, demo, path="/")
|
| 233 |
|
| 234 |
if __name__ == "__main__":
|
| 235 |
-
print("[BOOT] Running Gradio+FastAPI combo on http://127.0.0.1:
|
| 236 |
-
uvicorn.run(app, host="0.0.0.0", port=
|
|
|
|
| 9 |
|
| 10 |
app.add_middleware(
|
| 11 |
CORSMiddleware,
|
| 12 |
+
allow_origins=["*"],
|
| 13 |
allow_credentials=True,
|
| 14 |
allow_methods=["*"],
|
| 15 |
allow_headers=["*"],
|
|
|
|
| 232 |
app = gr.mount_gradio_app(app, demo, path="/")
|
| 233 |
|
| 234 |
if __name__ == "__main__":
|
| 235 |
+
print("[BOOT] Running Gradio+FastAPI combo on http://127.0.0.1:8000")
|
| 236 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|