Spaces:
Running
Running
owenkaplinsky
commited on
Commit
·
024cccc
1
Parent(s):
be52692
Add deleting blocks
Browse files- project/chat.py +264 -81
- project/src/generators/chat.js +9 -5
- project/src/index.js +91 -0
project/chat.py
CHANGED
|
@@ -3,9 +3,13 @@ import re
|
|
| 3 |
import requests
|
| 4 |
from fastapi import FastAPI, Request
|
| 5 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
| 6 |
from openai import OpenAI
|
| 7 |
import uvicorn
|
| 8 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
# Initialize OpenAI client (will be updated when API key is set)
|
| 11 |
client = None
|
|
@@ -16,6 +20,10 @@ stored_api_key = ""
|
|
| 16 |
# Global variable to store the latest chat context
|
| 17 |
latest_blockly_chat_code = ""
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
# FastAPI App
|
| 20 |
app = FastAPI()
|
| 21 |
|
|
@@ -27,7 +35,6 @@ app.add_middleware(
|
|
| 27 |
allow_headers=["*"],
|
| 28 |
)
|
| 29 |
|
| 30 |
-
# Gets FAKE code, meant for the LLM only and is not valid Python
|
| 31 |
@app.post("/update_chat")
|
| 32 |
async def update_chat(request: Request):
|
| 33 |
global latest_blockly_chat_code
|
|
@@ -169,36 +176,178 @@ def execute_mcp(mcp_call):
|
|
| 169 |
traceback.print_exc()
|
| 170 |
return f"Error executing MCP: {str(e)}"
|
| 171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
def create_gradio_interface():
|
| 173 |
# Hardcoded system prompt
|
| 174 |
-
SYSTEM_PROMPT = """You are an AI assistant
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
`block_name(inputs(input_name:
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
create_mcp(input_name=value)
|
| 196 |
```
|
| 197 |
|
| 198 |
-
|
| 199 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
-
|
| 202 |
|
| 203 |
def chat_with_context(message, history):
|
| 204 |
# Check if API key is set and create/update client
|
|
@@ -236,68 +385,102 @@ So, if the user asks you to run the MCP, YOU HAVE THE ABILITY TO. DO NOT SAY THA
|
|
| 236 |
else:
|
| 237 |
full_system_prompt += "\n\nNote: No Blockly workspace context is currently available."
|
| 238 |
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
# Check if the response contains ```mcp code block
|
| 252 |
-
mcp_pattern = r'```mcp\n(.+?)\n```'
|
| 253 |
-
mcp_match = re.search(mcp_pattern, ai_response, re.DOTALL)
|
| 254 |
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
|
|
|
| 263 |
|
| 264 |
-
|
| 265 |
-
mcp_result = execute_mcp(mcp_call)
|
| 266 |
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
|
| 269 |
-
#
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
]
|
| 283 |
-
)
|
| 284 |
-
|
| 285 |
-
# Combine the filtered initial response with the follow-up
|
| 286 |
-
final_response = displayed_response
|
| 287 |
-
if displayed_response:
|
| 288 |
-
final_response += "\n\n"
|
| 289 |
-
final_response += f"**MCP Execution Result:** {mcp_result}\n\n"
|
| 290 |
-
final_response += follow_up_response.choices[0].message.content
|
| 291 |
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
return f"{
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
|
|
|
|
|
|
| 301 |
|
| 302 |
# Create the standard ChatInterface
|
| 303 |
demo = gr.ChatInterface(
|
|
|
|
| 3 |
import requests
|
| 4 |
from fastapi import FastAPI, Request
|
| 5 |
from fastapi.middleware.cors import CORSMiddleware
|
| 6 |
+
from fastapi.responses import StreamingResponse
|
| 7 |
from openai import OpenAI
|
| 8 |
import uvicorn
|
| 9 |
import gradio as gr
|
| 10 |
+
import asyncio
|
| 11 |
+
import queue
|
| 12 |
+
import json
|
| 13 |
|
| 14 |
# Initialize OpenAI client (will be updated when API key is set)
|
| 15 |
client = None
|
|
|
|
| 20 |
# Global variable to store the latest chat context
|
| 21 |
latest_blockly_chat_code = ""
|
| 22 |
|
| 23 |
+
# Queue for deletion requests and results storage
|
| 24 |
+
deletion_queue = queue.Queue()
|
| 25 |
+
deletion_results = {}
|
| 26 |
+
|
| 27 |
# FastAPI App
|
| 28 |
app = FastAPI()
|
| 29 |
|
|
|
|
| 35 |
allow_headers=["*"],
|
| 36 |
)
|
| 37 |
|
|
|
|
| 38 |
@app.post("/update_chat")
|
| 39 |
async def update_chat(request: Request):
|
| 40 |
global latest_blockly_chat_code
|
|
|
|
| 176 |
traceback.print_exc()
|
| 177 |
return f"Error executing MCP: {str(e)}"
|
| 178 |
|
| 179 |
+
def delete_block(block_id):
|
| 180 |
+
"""Delete a block from the Blockly workspace"""
|
| 181 |
+
try:
|
| 182 |
+
print(f"[DELETE REQUEST] Attempting to delete block: {block_id}")
|
| 183 |
+
|
| 184 |
+
# Clear any old results for this block ID first
|
| 185 |
+
if block_id in deletion_results:
|
| 186 |
+
deletion_results.pop(block_id)
|
| 187 |
+
|
| 188 |
+
# Add to deletion queue
|
| 189 |
+
deletion_queue.put({"block_id": block_id})
|
| 190 |
+
print(f"[DELETE REQUEST] Added to queue: {block_id}")
|
| 191 |
+
|
| 192 |
+
# Wait for result with timeout
|
| 193 |
+
import time
|
| 194 |
+
timeout = 8 # Increased timeout to 8 seconds
|
| 195 |
+
start_time = time.time()
|
| 196 |
+
check_interval = 0.05 # Check more frequently
|
| 197 |
+
|
| 198 |
+
while time.time() - start_time < timeout:
|
| 199 |
+
if block_id in deletion_results:
|
| 200 |
+
result = deletion_results.pop(block_id)
|
| 201 |
+
print(f"[DELETE RESULT] Received result for {block_id}: success={result.get('success')}, error={result.get('error')}")
|
| 202 |
+
if result["success"]:
|
| 203 |
+
return f"Successfully deleted block {block_id}"
|
| 204 |
+
else:
|
| 205 |
+
return f"Failed to delete block {block_id}: {result.get('error', 'Unknown error')}"
|
| 206 |
+
time.sleep(check_interval)
|
| 207 |
+
|
| 208 |
+
print(f"[DELETE TIMEOUT] No response received for block {block_id} after {timeout} seconds")
|
| 209 |
+
return f"Timeout waiting for deletion confirmation for block {block_id}"
|
| 210 |
+
|
| 211 |
+
except Exception as e:
|
| 212 |
+
print(f"[DELETE ERROR] {e}")
|
| 213 |
+
import traceback
|
| 214 |
+
traceback.print_exc()
|
| 215 |
+
return f"Error deleting block: {str(e)}"
|
| 216 |
+
|
| 217 |
+
# Server-Sent Events endpoint for deletion requests
|
| 218 |
+
@app.get("/delete_stream")
|
| 219 |
+
async def delete_stream():
|
| 220 |
+
"""Stream deletion requests to the frontend using Server-Sent Events"""
|
| 221 |
+
|
| 222 |
+
async def clear_sent_request(sent_requests, block_id, delay):
|
| 223 |
+
"""Clear block_id from sent_requests after delay seconds"""
|
| 224 |
+
await asyncio.sleep(delay)
|
| 225 |
+
if block_id in sent_requests:
|
| 226 |
+
sent_requests.discard(block_id)
|
| 227 |
+
|
| 228 |
+
async def event_generator():
|
| 229 |
+
sent_requests = set() # Track sent requests to avoid duplicates
|
| 230 |
+
heartbeat_counter = 0
|
| 231 |
+
|
| 232 |
+
while True:
|
| 233 |
+
try:
|
| 234 |
+
# Check for deletion requests (non-blocking)
|
| 235 |
+
if not deletion_queue.empty():
|
| 236 |
+
deletion_request = deletion_queue.get_nowait()
|
| 237 |
+
block_id = deletion_request.get("block_id")
|
| 238 |
+
|
| 239 |
+
# Avoid sending duplicate requests too quickly
|
| 240 |
+
if block_id not in sent_requests:
|
| 241 |
+
sent_requests.add(block_id)
|
| 242 |
+
print(f"[SSE SEND] Sending deletion request for block: {block_id}")
|
| 243 |
+
yield f"data: {json.dumps(deletion_request)}\n\n"
|
| 244 |
+
|
| 245 |
+
# Clear from sent_requests after 10 seconds
|
| 246 |
+
asyncio.create_task(clear_sent_request(sent_requests, block_id, 10))
|
| 247 |
+
else:
|
| 248 |
+
print(f"[SSE SKIP] Skipping duplicate request for block: {block_id}")
|
| 249 |
+
|
| 250 |
+
await asyncio.sleep(0.1) # Small delay between messages
|
| 251 |
+
else:
|
| 252 |
+
# Send a heartbeat every 30 seconds to keep connection alive
|
| 253 |
+
heartbeat_counter += 1
|
| 254 |
+
if heartbeat_counter >= 300: # 300 * 0.1 = 30 seconds
|
| 255 |
+
yield f"data: {json.dumps({'heartbeat': True})}\n\n"
|
| 256 |
+
heartbeat_counter = 0
|
| 257 |
+
await asyncio.sleep(0.1)
|
| 258 |
+
except queue.Empty:
|
| 259 |
+
await asyncio.sleep(0.1)
|
| 260 |
+
except Exception as e:
|
| 261 |
+
print(f"[SSE ERROR] {e}")
|
| 262 |
+
await asyncio.sleep(1)
|
| 263 |
+
|
| 264 |
+
return StreamingResponse(
|
| 265 |
+
event_generator(),
|
| 266 |
+
media_type="text/event-stream",
|
| 267 |
+
headers={
|
| 268 |
+
"Cache-Control": "no-cache",
|
| 269 |
+
"Connection": "keep-alive",
|
| 270 |
+
"X-Accel-Buffering": "no",
|
| 271 |
+
}
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# Endpoint to receive deletion results from frontend
|
| 275 |
+
@app.post("/deletion_result")
|
| 276 |
+
async def deletion_result(request: Request):
|
| 277 |
+
"""Receive deletion results from the frontend"""
|
| 278 |
+
data = await request.json()
|
| 279 |
+
block_id = data.get("block_id")
|
| 280 |
+
success = data.get("success")
|
| 281 |
+
error = data.get("error")
|
| 282 |
+
|
| 283 |
+
print(f"[DELETION RESULT RECEIVED] block_id={block_id}, success={success}, error={error}")
|
| 284 |
+
|
| 285 |
+
if block_id:
|
| 286 |
+
# Store the result for the delete_block function to retrieve
|
| 287 |
+
deletion_results[block_id] = data
|
| 288 |
+
print(f"[DELETION RESULT STORED] Results dict now has {len(deletion_results)} items")
|
| 289 |
+
|
| 290 |
+
return {"received": True}
|
| 291 |
+
|
| 292 |
def create_gradio_interface():
|
| 293 |
# Hardcoded system prompt
|
| 294 |
+
SYSTEM_PROMPT = """You are an AI assistant that helps users build **MCP servers** using Blockly blocks.
|
| 295 |
+
MCP lets AI systems define tools with specific inputs and outputs that any LLM can call.
|
| 296 |
+
|
| 297 |
+
You’ll receive the workspace state in this format:
|
| 298 |
+
`blockId | block_name(inputs(input_name: value))`
|
| 299 |
+
|
| 300 |
+
**Special cases:**
|
| 301 |
+
- `create_mcp` and `func_def` use
|
| 302 |
+
`blockId | block_name(inputs(input_name: type), outputs(output_name: value))`
|
| 303 |
+
- Indentation or nesting shows logic hierarchy (like loops or conditionals).
|
| 304 |
+
- The `blockId` before the pipe `|` is each block’s unique identifier.
|
| 305 |
+
|
| 306 |
+
---
|
| 307 |
+
|
| 308 |
+
### Your job
|
| 309 |
+
- Help users understand or fix their MCP logic in natural, human language.
|
| 310 |
+
- Never mention the internal block syntax or say “multi-context-protocol.” Just call it **MCP**.
|
| 311 |
+
- Focus on what the code *does* and what the user is trying to achieve, not on the raw block format.
|
| 312 |
+
|
| 313 |
+
---
|
| 314 |
+
|
| 315 |
+
### Using Tools
|
| 316 |
+
Before using any tool, **explicitly plan** what you will do.
|
| 317 |
+
You can only use **one tool per message** - NEVER EVER combine multiple tool calls in one message.
|
| 318 |
+
If you need two actions, use two messages.
|
| 319 |
+
When you invoke a tool, it must be the **last thing in your message**.
|
| 320 |
+
|
| 321 |
+
To call a tool, use this exact format (no newline after the opening backticks):
|
| 322 |
+
|
| 323 |
+
```name
|
| 324 |
+
(arguments_here)
|
| 325 |
+
```
|
| 326 |
+
|
| 327 |
+
---
|
| 328 |
+
|
| 329 |
+
### Running MCPs
|
| 330 |
+
You can execute MCPs directly.
|
| 331 |
+
End your message (and say nothing after) with:
|
| 332 |
+
|
| 333 |
+
```run
|
| 334 |
create_mcp(input_name=value)
|
| 335 |
```
|
| 336 |
|
| 337 |
+
Use plain Python-style arguments (no `inputs()` wrapper).
|
| 338 |
+
That’s how you actually run the MCP.
|
| 339 |
+
|
| 340 |
+
---
|
| 341 |
+
|
| 342 |
+
### Deleting Blocks
|
| 343 |
+
Each block starts with its ID, like `blockId | block_name(...)`.
|
| 344 |
+
To delete one, end your message with:
|
| 345 |
+
|
| 346 |
+
```delete
|
| 347 |
+
blockId
|
| 348 |
+
```
|
| 349 |
|
| 350 |
+
You can delete any block except the main `create_mcp` block."""
|
| 351 |
|
| 352 |
def chat_with_context(message, history):
|
| 353 |
# Check if API key is set and create/update client
|
|
|
|
| 385 |
else:
|
| 386 |
full_system_prompt += "\n\nNote: No Blockly workspace context is currently available."
|
| 387 |
|
| 388 |
+
# Allow up to 5 consecutive messages from the agent
|
| 389 |
+
accumulated_response = ""
|
| 390 |
+
max_iterations = 5
|
| 391 |
+
current_iteration = 0
|
| 392 |
+
|
| 393 |
+
# Start with the user's original message
|
| 394 |
+
current_prompt = message
|
| 395 |
+
temp_history = full_history.copy()
|
| 396 |
+
|
| 397 |
+
while current_iteration < max_iterations:
|
| 398 |
+
current_iteration += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
| 399 |
|
| 400 |
+
try:
|
| 401 |
+
response = client.chat.completions.create(
|
| 402 |
+
model="gpt-3.5-turbo",
|
| 403 |
+
messages=[
|
| 404 |
+
{"role": "system", "content": full_system_prompt},
|
| 405 |
+
*temp_history,
|
| 406 |
+
{"role": "user", "content": current_prompt}
|
| 407 |
+
]
|
| 408 |
+
)
|
| 409 |
|
| 410 |
+
ai_response = response.choices[0].message.content
|
|
|
|
| 411 |
|
| 412 |
+
# Define action patterns and their handlers
|
| 413 |
+
action_patterns = {
|
| 414 |
+
'run': {
|
| 415 |
+
'pattern': r'```run\n(.+?)\n```',
|
| 416 |
+
'label': 'MCP',
|
| 417 |
+
'result_label': 'MCP Execution Result',
|
| 418 |
+
'handler': lambda content: execute_mcp(content),
|
| 419 |
+
'next_prompt': "Please respond to the MCP execution result above and provide any relevant information to the user. If you need to run another MCP or delete code, you can do so."
|
| 420 |
+
},
|
| 421 |
+
'delete': {
|
| 422 |
+
'pattern': r'```delete\n(.+?)\n```',
|
| 423 |
+
'label': 'DELETE',
|
| 424 |
+
'result_label': 'Delete Operation',
|
| 425 |
+
'handler': lambda content: delete_block(content.strip()),
|
| 426 |
+
'next_prompt': "Please respond to the delete operation result above. If you need to run an MCP or delete more code, you can do so."
|
| 427 |
+
}
|
| 428 |
+
}
|
| 429 |
|
| 430 |
+
# Check for action blocks
|
| 431 |
+
action_found = False
|
| 432 |
+
for action_type, config in action_patterns.items():
|
| 433 |
+
match = re.search(config['pattern'], ai_response, re.DOTALL)
|
| 434 |
+
if match:
|
| 435 |
+
action_found = True
|
| 436 |
+
|
| 437 |
+
# Extract content and filter the action block from displayed message
|
| 438 |
+
action_content = match.group(1)
|
| 439 |
+
displayed_response = ai_response[:match.start()].rstrip()
|
| 440 |
+
|
| 441 |
+
print(f"[{config['label']} DETECTED] Processing: {action_content}")
|
| 442 |
+
|
| 443 |
+
# Execute the action
|
| 444 |
+
action_result = config['handler'](action_content)
|
| 445 |
+
|
| 446 |
+
print(f"[{config['label']} RESULT] {action_result}")
|
| 447 |
+
|
| 448 |
+
# Add to accumulated response
|
| 449 |
+
if accumulated_response:
|
| 450 |
+
accumulated_response += "\n\n"
|
| 451 |
+
if displayed_response:
|
| 452 |
+
accumulated_response += displayed_response + "\n\n"
|
| 453 |
+
accumulated_response += f"**{config['result_label']}:** {action_result}"
|
| 454 |
+
|
| 455 |
+
# Update history for next iteration
|
| 456 |
+
temp_history.append({"role": "user", "content": current_prompt})
|
| 457 |
+
temp_history.append({"role": "assistant", "content": ai_response})
|
| 458 |
+
temp_history.append({"role": "system", "content": f"{config['result_label']}: {action_result}"})
|
| 459 |
+
|
| 460 |
+
# Set up next prompt
|
| 461 |
+
current_prompt = config['next_prompt']
|
| 462 |
+
break
|
| 463 |
|
| 464 |
+
if action_found:
|
| 465 |
+
continue
|
| 466 |
+
else:
|
| 467 |
+
# No action blocks found, this is the final response
|
| 468 |
+
if accumulated_response:
|
| 469 |
+
accumulated_response += "\n\n"
|
| 470 |
+
accumulated_response += ai_response
|
| 471 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 472 |
|
| 473 |
+
except Exception as e:
|
| 474 |
+
if accumulated_response:
|
| 475 |
+
return f"{accumulated_response}\n\nError in iteration {current_iteration}: {str(e)}"
|
| 476 |
+
else:
|
| 477 |
+
return f"Error: {str(e)}"
|
| 478 |
+
|
| 479 |
+
# If we hit max iterations, add a note
|
| 480 |
+
if current_iteration >= max_iterations:
|
| 481 |
+
accumulated_response += f"\n\n*(Reached maximum of {max_iterations} consecutive responses)*"
|
| 482 |
+
|
| 483 |
+
return accumulated_response if accumulated_response else "No response generated"
|
| 484 |
|
| 485 |
# Create the standard ChatInterface
|
| 486 |
demo = gr.ChatInterface(
|
project/src/generators/chat.js
CHANGED
|
@@ -70,7 +70,8 @@ forBlock['create_mcp'] = function (block, generator) {
|
|
| 70 |
let body = generator.statementToCode(block, 'BODY');
|
| 71 |
|
| 72 |
// Construct the create_mcp call with inputs and outputs
|
| 73 |
-
|
|
|
|
| 74 |
|
| 75 |
// Add the function body
|
| 76 |
if (body) {
|
|
@@ -119,7 +120,8 @@ forBlock['func_def'] = function (block, generator) {
|
|
| 119 |
let body = generator.statementToCode(block, 'BODY');
|
| 120 |
|
| 121 |
// Construct the func_def call with inputs and outputs
|
| 122 |
-
|
|
|
|
| 123 |
|
| 124 |
// Add the function body
|
| 125 |
if (body) {
|
|
@@ -211,8 +213,8 @@ chatGenerator.blockToCode = function(block, opt_thisOnly) {
|
|
| 211 |
}
|
| 212 |
}
|
| 213 |
|
| 214 |
-
// Generate the standard format: name(inputs(...))
|
| 215 |
-
const code = `${blockType}(inputs(${inputs.join(', ')}))`;
|
| 216 |
|
| 217 |
// Handle statement inputs (for blocks that have a body)
|
| 218 |
let statements = '';
|
|
@@ -228,7 +230,9 @@ chatGenerator.blockToCode = function(block, opt_thisOnly) {
|
|
| 228 |
// Return appropriate format based on whether it's a value or statement block
|
| 229 |
if (block.outputConnection) {
|
| 230 |
// This is a value block (can be plugged into inputs)
|
| 231 |
-
|
|
|
|
|
|
|
| 232 |
} else {
|
| 233 |
// This is a statement block (has prev/next connections)
|
| 234 |
const fullCode = code + (statements ? '\n' + statements : '');
|
|
|
|
| 70 |
let body = generator.statementToCode(block, 'BODY');
|
| 71 |
|
| 72 |
// Construct the create_mcp call with inputs and outputs
|
| 73 |
+
// Include block ID for deletion tracking with pipe separator
|
| 74 |
+
let code = `${block.id} | create_mcp(inputs(${inputParams.join(', ')}), outputs(${outputParams.join(', ')}))`
|
| 75 |
|
| 76 |
// Add the function body
|
| 77 |
if (body) {
|
|
|
|
| 120 |
let body = generator.statementToCode(block, 'BODY');
|
| 121 |
|
| 122 |
// Construct the func_def call with inputs and outputs
|
| 123 |
+
// Include block ID for deletion tracking with pipe separator
|
| 124 |
+
let code = `${block.id} | ${name}(inputs(${inputParams.join(', ')}), outputs(${outputParams.join(', ')}))`
|
| 125 |
|
| 126 |
// Add the function body
|
| 127 |
if (body) {
|
|
|
|
| 213 |
}
|
| 214 |
}
|
| 215 |
|
| 216 |
+
// Generate the standard format: name(inputs(...)) with block ID and pipe separator
|
| 217 |
+
const code = `${block.id} | ${blockType}(inputs(${inputs.join(', ')}))`;
|
| 218 |
|
| 219 |
// Handle statement inputs (for blocks that have a body)
|
| 220 |
let statements = '';
|
|
|
|
| 230 |
// Return appropriate format based on whether it's a value or statement block
|
| 231 |
if (block.outputConnection) {
|
| 232 |
// This is a value block (can be plugged into inputs)
|
| 233 |
+
// For value blocks, don't include the ID in the returned value
|
| 234 |
+
const valueCode = `${blockType}(inputs(${inputs.join(', ')}))`;
|
| 235 |
+
return [valueCode, this.ORDER_ATOMIC];
|
| 236 |
} else {
|
| 237 |
// This is a statement block (has prev/next connections)
|
| 238 |
const fullCode = code + (statements ? '\n' + statements : '');
|
project/src/index.js
CHANGED
|
@@ -208,6 +208,97 @@ cleanWorkspace.addEventListener("click", () => {
|
|
| 208 |
ws.cleanUp();
|
| 209 |
});
|
| 210 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 211 |
// Observe any size change to the blockly container
|
| 212 |
const observer = new ResizeObserver(() => {
|
| 213 |
Blockly.svgResize(ws);
|
|
|
|
| 208 |
ws.cleanUp();
|
| 209 |
});
|
| 210 |
|
| 211 |
+
// Set up SSE connection for deletion requests
|
| 212 |
+
const setupDeletionStream = () => {
|
| 213 |
+
const eventSource = new EventSource('http://127.0.0.1:7861/delete_stream');
|
| 214 |
+
const processedRequests = new Set(); // Track processed deletion requests
|
| 215 |
+
|
| 216 |
+
eventSource.onmessage = (event) => {
|
| 217 |
+
try {
|
| 218 |
+
const data = JSON.parse(event.data);
|
| 219 |
+
|
| 220 |
+
// Skip heartbeat messages
|
| 221 |
+
if (data.heartbeat) return;
|
| 222 |
+
|
| 223 |
+
// Skip if we've already processed this exact request
|
| 224 |
+
const requestKey = `${data.block_id}_${Date.now()}`;
|
| 225 |
+
if (data.block_id && processedRequests.has(data.block_id)) {
|
| 226 |
+
console.log('[SSE] Skipping duplicate deletion request for:', data.block_id);
|
| 227 |
+
return;
|
| 228 |
+
}
|
| 229 |
+
if (data.block_id) {
|
| 230 |
+
processedRequests.add(data.block_id);
|
| 231 |
+
// Clear after 10 seconds to allow retries if needed
|
| 232 |
+
setTimeout(() => processedRequests.delete(data.block_id), 10000);
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
if (data.block_id) {
|
| 236 |
+
console.log('[SSE] Received deletion request for block:', data.block_id);
|
| 237 |
+
|
| 238 |
+
// Try to delete the block
|
| 239 |
+
const block = ws.getBlockById(data.block_id);
|
| 240 |
+
let success = false;
|
| 241 |
+
let error = null;
|
| 242 |
+
|
| 243 |
+
if (block) {
|
| 244 |
+
console.log('[SSE] Found block to delete:', block.type, block.id);
|
| 245 |
+
// Check if it's the main create_mcp block (which shouldn't be deleted)
|
| 246 |
+
if (block.type === 'create_mcp' && !block.isDeletable()) {
|
| 247 |
+
error = 'Cannot delete the main create_mcp block';
|
| 248 |
+
console.log('[SSE] Block is protected create_mcp');
|
| 249 |
+
} else {
|
| 250 |
+
try {
|
| 251 |
+
block.dispose(true);
|
| 252 |
+
success = true;
|
| 253 |
+
console.log('[SSE] Successfully deleted block:', data.block_id);
|
| 254 |
+
} catch (e) {
|
| 255 |
+
error = e.toString();
|
| 256 |
+
console.error('[SSE] Error deleting block:', e);
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
} else {
|
| 260 |
+
error = 'Block not found';
|
| 261 |
+
console.log('[SSE] Block not found:', data.block_id);
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
// Send result back to backend immediately
|
| 265 |
+
console.log('[SSE] Sending deletion result:', { block_id: data.block_id, success, error });
|
| 266 |
+
fetch('http://127.0.0.1:7861/deletion_result', {
|
| 267 |
+
method: 'POST',
|
| 268 |
+
headers: { 'Content-Type': 'application/json' },
|
| 269 |
+
body: JSON.stringify({
|
| 270 |
+
block_id: data.block_id,
|
| 271 |
+
success: success,
|
| 272 |
+
error: error
|
| 273 |
+
})
|
| 274 |
+
}).then(response => {
|
| 275 |
+
console.log('[SSE] Deletion result sent successfully');
|
| 276 |
+
}).catch(err => {
|
| 277 |
+
console.error('[SSE] Error sending deletion result:', err);
|
| 278 |
+
});
|
| 279 |
+
}
|
| 280 |
+
} catch (err) {
|
| 281 |
+
console.error('[SSE] Error processing message:', err);
|
| 282 |
+
}
|
| 283 |
+
};
|
| 284 |
+
|
| 285 |
+
eventSource.onerror = (error) => {
|
| 286 |
+
console.error('[SSE] Connection error:', error);
|
| 287 |
+
// Reconnect after 5 seconds
|
| 288 |
+
setTimeout(() => {
|
| 289 |
+
console.log('[SSE] Attempting to reconnect...');
|
| 290 |
+
setupDeletionStream();
|
| 291 |
+
}, 5000);
|
| 292 |
+
};
|
| 293 |
+
|
| 294 |
+
eventSource.onopen = () => {
|
| 295 |
+
console.log('[SSE] Connected to deletion stream');
|
| 296 |
+
};
|
| 297 |
+
};
|
| 298 |
+
|
| 299 |
+
// Start the SSE connection
|
| 300 |
+
setupDeletionStream();
|
| 301 |
+
|
| 302 |
// Observe any size change to the blockly container
|
| 303 |
const observer = new ResizeObserver(() => {
|
| 304 |
Blockly.svgResize(ws);
|