Spaces:
Running
Running
owenkaplinsky
commited on
Commit
·
1e62dc6
1
Parent(s):
df3419a
Add history and multiple messages
Browse files- hello-world/app.py +45 -41
- hello-world/src/blocks/text.js +36 -2
- hello-world/src/generators/python.js +11 -2
- hello-world/src/index.css +1 -1
- hello-world/src/index.js +37 -4
- hello-world/src/toolbox.js +17 -15
- requirements.txt +3 -1
hello-world/app.py
CHANGED
|
@@ -3,10 +3,12 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
| 3 |
import gradio as gr
|
| 4 |
import uvicorn
|
| 5 |
import asyncio
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
app = FastAPI()
|
| 8 |
|
| 9 |
-
# Allow Blockly (running on localhost:8080) to connect
|
| 10 |
app.add_middleware(
|
| 11 |
CORSMiddleware,
|
| 12 |
allow_origins=["http://localhost:8080", "http://127.0.0.1:8080"],
|
|
@@ -15,19 +17,21 @@ app.add_middleware(
|
|
| 15 |
allow_headers=["*"],
|
| 16 |
)
|
| 17 |
|
| 18 |
-
|
| 19 |
-
history = []
|
| 20 |
|
| 21 |
-
|
| 22 |
latest_blockly_code = ""
|
| 23 |
-
|
| 24 |
-
# A queue for assistant replies (so Gradio waits until Blockly has responded)
|
| 25 |
assistant_queue = asyncio.Queue()
|
| 26 |
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
@app.post("/update_code")
|
| 29 |
async def update_code(request: Request):
|
| 30 |
-
"""Receives live Python code generated by Blockly."""
|
| 31 |
global latest_blockly_code
|
| 32 |
data = await request.json()
|
| 33 |
latest_blockly_code = data.get("code", "")
|
|
@@ -35,59 +39,59 @@ async def update_code(request: Request):
|
|
| 35 |
return {"ok": True}
|
| 36 |
|
| 37 |
|
| 38 |
-
def execute_blockly_logic(user_message: str)
|
| 39 |
-
|
| 40 |
-
Executes the latest Blockly-generated Python code in a controlled environment.
|
| 41 |
-
Expects the Blockly code to define a function called on_user_send(user_message)
|
| 42 |
-
that returns or prints the assistant reply.
|
| 43 |
-
"""
|
| 44 |
-
global latest_blockly_code
|
| 45 |
-
|
| 46 |
if not latest_blockly_code.strip():
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
try:
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
# Call the defined handler function, if it exists
|
| 55 |
-
if "on_user_send" in local_vars:
|
| 56 |
-
result = local_vars["on_user_send"](user_message)
|
| 57 |
-
if result is None:
|
| 58 |
-
result = "(no reply)"
|
| 59 |
-
return str(result)
|
| 60 |
-
else:
|
| 61 |
-
print("[EXECUTION] No on_user_send() found in Blockly code.")
|
| 62 |
-
return "(missing on_user_send)"
|
| 63 |
except Exception as e:
|
| 64 |
print("[EXECUTION ERROR]", e)
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
def build_interface():
|
| 69 |
with gr.Blocks() as demo:
|
| 70 |
-
chatbot = gr.Chatbot(type="messages", label="Assistant")
|
| 71 |
msg = gr.Textbox(placeholder="Type a message and press Enter")
|
| 72 |
|
| 73 |
async def process_message(message):
|
| 74 |
-
global history
|
| 75 |
-
|
| 76 |
-
# 1. Add user message
|
| 77 |
history.append({"role": "user", "content": message})
|
| 78 |
print(f"[USER] {message!r}")
|
| 79 |
yield "", history
|
| 80 |
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
print(f"[ASSISTANT] {assistant_text!r}")
|
| 84 |
|
| 85 |
-
|
| 86 |
-
history.append({"role": "assistant", "content": assistant_text})
|
| 87 |
-
yield "", history
|
| 88 |
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
|
|
|
| 91 |
clear_btn = gr.Button("Reset chat")
|
| 92 |
clear_btn.click(lambda: ([], ""), None, [chatbot, msg], queue=False)
|
| 93 |
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
import uvicorn
|
| 5 |
import asyncio
|
| 6 |
+
import threading
|
| 7 |
+
import os
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
|
| 10 |
app = FastAPI()
|
| 11 |
|
|
|
|
| 12 |
app.add_middleware(
|
| 13 |
CORSMiddleware,
|
| 14 |
allow_origins=["http://localhost:8080", "http://127.0.0.1:8080"],
|
|
|
|
| 17 |
allow_headers=["*"],
|
| 18 |
)
|
| 19 |
|
| 20 |
+
load_dotenv()
|
|
|
|
| 21 |
|
| 22 |
+
history = []
|
| 23 |
latest_blockly_code = ""
|
|
|
|
|
|
|
| 24 |
assistant_queue = asyncio.Queue()
|
| 25 |
|
| 26 |
|
| 27 |
+
async def reply(message):
|
| 28 |
+
global history, assistant_queue
|
| 29 |
+
history.append({"role": "assistant", "content": message})
|
| 30 |
+
await assistant_queue.put(message)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
@app.post("/update_code")
|
| 34 |
async def update_code(request: Request):
|
|
|
|
| 35 |
global latest_blockly_code
|
| 36 |
data = await request.json()
|
| 37 |
latest_blockly_code = data.get("code", "")
|
|
|
|
| 39 |
return {"ok": True}
|
| 40 |
|
| 41 |
|
| 42 |
+
def execute_blockly_logic(user_message: str, loop):
|
| 43 |
+
global latest_blockly_code, history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
if not latest_blockly_code.strip():
|
| 45 |
+
return
|
| 46 |
+
|
| 47 |
+
def safe_reply(msg):
|
| 48 |
+
asyncio.run_coroutine_threadsafe(reply(msg), loop)
|
| 49 |
+
|
| 50 |
+
env = {
|
| 51 |
+
"reply": safe_reply,
|
| 52 |
+
"history": history,
|
| 53 |
+
"print": print,
|
| 54 |
+
}
|
| 55 |
|
| 56 |
try:
|
| 57 |
+
exec(latest_blockly_code, env)
|
| 58 |
+
if "on_user_send" in env:
|
| 59 |
+
env["on_user_send"](user_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
except Exception as e:
|
| 61 |
print("[EXECUTION ERROR]", e)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def run_blockly_thread(user_message):
|
| 65 |
+
loop = asyncio.get_running_loop()
|
| 66 |
+
thread = threading.Thread(target=execute_blockly_logic, args=(user_message, loop))
|
| 67 |
+
thread.start()
|
| 68 |
|
| 69 |
|
| 70 |
def build_interface():
|
| 71 |
with gr.Blocks() as demo:
|
| 72 |
+
chatbot = gr.Chatbot(type="messages", label="Assistant", group_consecutive_messages=False)
|
| 73 |
msg = gr.Textbox(placeholder="Type a message and press Enter")
|
| 74 |
|
| 75 |
async def process_message(message):
|
| 76 |
+
global history, assistant_queue
|
|
|
|
|
|
|
| 77 |
history.append({"role": "user", "content": message})
|
| 78 |
print(f"[USER] {message!r}")
|
| 79 |
yield "", history
|
| 80 |
|
| 81 |
+
while not assistant_queue.empty():
|
| 82 |
+
assistant_queue.get_nowait()
|
|
|
|
| 83 |
|
| 84 |
+
run_blockly_thread(message)
|
|
|
|
|
|
|
| 85 |
|
| 86 |
+
while True:
|
| 87 |
+
try:
|
| 88 |
+
reply_text = await asyncio.wait_for(assistant_queue.get(), timeout=2)
|
| 89 |
+
print(f"[ASSISTANT STREAM] {reply_text!r}")
|
| 90 |
+
yield "", history
|
| 91 |
+
except asyncio.TimeoutError:
|
| 92 |
+
break
|
| 93 |
|
| 94 |
+
msg.submit(process_message, [msg], [msg, chatbot], queue=True)
|
| 95 |
clear_btn = gr.Button("Reset chat")
|
| 96 |
clear_btn.click(lambda: ([], ""), None, [chatbot, msg], queue=False)
|
| 97 |
|
hello-world/src/blocks/text.js
CHANGED
|
@@ -8,7 +8,7 @@ import * as Blockly from 'blockly/core';
|
|
| 8 |
|
| 9 |
const whenUserSends = {
|
| 10 |
type: 'when_user_sends',
|
| 11 |
-
message0: '
|
| 12 |
args0: [
|
| 13 |
{
|
| 14 |
"type": "input_dummy"
|
|
@@ -26,7 +26,7 @@ const whenUserSends = {
|
|
| 26 |
|
| 27 |
const assistantReply = {
|
| 28 |
type: 'assistant_reply',
|
| 29 |
-
message0: '
|
| 30 |
args0: [
|
| 31 |
{
|
| 32 |
type: 'input_value',
|
|
@@ -41,7 +41,41 @@ const assistantReply = {
|
|
| 41 |
helpUrl: '',
|
| 42 |
};
|
| 43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
export const blocks = Blockly.common.createBlockDefinitionsFromJsonArray([
|
| 45 |
whenUserSends,
|
| 46 |
assistantReply,
|
|
|
|
| 47 |
]);
|
|
|
|
| 8 |
|
| 9 |
const whenUserSends = {
|
| 10 |
type: 'when_user_sends',
|
| 11 |
+
message0: 'when user sends message do %1 %2',
|
| 12 |
args0: [
|
| 13 |
{
|
| 14 |
"type": "input_dummy"
|
|
|
|
| 26 |
|
| 27 |
const assistantReply = {
|
| 28 |
type: 'assistant_reply',
|
| 29 |
+
message0: 'reply with %1',
|
| 30 |
args0: [
|
| 31 |
{
|
| 32 |
type: 'input_value',
|
|
|
|
| 41 |
helpUrl: '',
|
| 42 |
};
|
| 43 |
|
| 44 |
+
const getAssistantResponse = {
|
| 45 |
+
type: 'get_assistant_response',
|
| 46 |
+
message0: 'call model %1 with prompt %2 with %3',
|
| 47 |
+
args0: [
|
| 48 |
+
{
|
| 49 |
+
type: 'field_dropdown',
|
| 50 |
+
name: 'MODEL',
|
| 51 |
+
options: [
|
| 52 |
+
['gpt-3.5-turbo', 'gpt-3.5-turbo-0125'],
|
| 53 |
+
['gpt-5-mini', 'gpt-5-mini-2025-08-07'],
|
| 54 |
+
],
|
| 55 |
+
},
|
| 56 |
+
{
|
| 57 |
+
type: 'input_value',
|
| 58 |
+
name: 'PROMPT',
|
| 59 |
+
check: 'String',
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
type: 'field_dropdown',
|
| 63 |
+
name: 'HISTORY',
|
| 64 |
+
options: [
|
| 65 |
+
["history", "True"],
|
| 66 |
+
["no history", "False"]
|
| 67 |
+
]
|
| 68 |
+
},
|
| 69 |
+
],
|
| 70 |
+
inputsInline: true,
|
| 71 |
+
output: 'String',
|
| 72 |
+
colour: 230,
|
| 73 |
+
tooltip: 'Call the selected OpenAI model to get a response.',
|
| 74 |
+
helpUrl: '',
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
export const blocks = Blockly.common.createBlockDefinitionsFromJsonArray([
|
| 78 |
whenUserSends,
|
| 79 |
assistantReply,
|
| 80 |
+
getAssistantResponse,
|
| 81 |
]);
|
hello-world/src/generators/python.js
CHANGED
|
@@ -5,13 +5,22 @@ export const forBlock = Object.create(null);
|
|
| 5 |
// Generates a Python function that runs when the user sends a message
|
| 6 |
forBlock['when_user_sends'] = function (block, generator) {
|
| 7 |
const body = generator.statementToCode(block, 'code') || "";
|
| 8 |
-
const code = `def on_user_send(user_message):\n${body}\n`;
|
| 9 |
return code;
|
| 10 |
};
|
| 11 |
|
| 12 |
// Generates a Python 'return' statement for the assistant's reply
|
| 13 |
forBlock['assistant_reply'] = function (block, generator) {
|
| 14 |
const reply = generator.valueToCode(block, 'INPUT', Order.NONE) || "''";
|
| 15 |
-
const code = `
|
| 16 |
return code;
|
| 17 |
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
// Generates a Python function that runs when the user sends a message
|
| 6 |
forBlock['when_user_sends'] = function (block, generator) {
|
| 7 |
const body = generator.statementToCode(block, 'code') || "";
|
| 8 |
+
const code = `def on_user_send(user_message):\n${body} return\n`;
|
| 9 |
return code;
|
| 10 |
};
|
| 11 |
|
| 12 |
// Generates a Python 'return' statement for the assistant's reply
|
| 13 |
forBlock['assistant_reply'] = function (block, generator) {
|
| 14 |
const reply = generator.valueToCode(block, 'INPUT', Order.NONE) || "''";
|
| 15 |
+
const code = `reply(${reply})\n`;
|
| 16 |
return code;
|
| 17 |
};
|
| 18 |
+
|
| 19 |
+
forBlock['get_assistant_response'] = function (block, generator) {
|
| 20 |
+
const model = block.getFieldValue('MODEL');
|
| 21 |
+
const prompt = generator.valueToCode(block, 'PROMPT', Order.NONE) || "''";
|
| 22 |
+
const history = block.getFieldValue('HISTORY');
|
| 23 |
+
|
| 24 |
+
const code = `get_assistant_response(${prompt}, model="${model}", use_history=${history})`;
|
| 25 |
+
return [code, Order.NONE];
|
| 26 |
+
};
|
hello-world/src/index.css
CHANGED
|
@@ -22,7 +22,7 @@ body {
|
|
| 22 |
#outputPane {
|
| 23 |
display: flex;
|
| 24 |
flex-direction: column;
|
| 25 |
-
width:
|
| 26 |
flex-shrink: 0;
|
| 27 |
height: 100%;
|
| 28 |
overflow: hidden;
|
|
|
|
| 22 |
#outputPane {
|
| 23 |
display: flex;
|
| 24 |
flex-direction: column;
|
| 25 |
+
width: 600px;
|
| 26 |
flex-shrink: 0;
|
| 27 |
height: 100%;
|
| 28 |
overflow: hidden;
|
hello-world/src/index.js
CHANGED
|
@@ -52,6 +52,10 @@ const ws = Blockly.inject(blocklyDiv, {
|
|
| 52 |
theme: myTheme,
|
| 53 |
});
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
ws.updateUserMessage = (message) => {
|
| 56 |
let variable = ws.getVariable('user_message');
|
| 57 |
if (!variable) ws.createVariable('user_message');
|
|
@@ -59,10 +63,34 @@ ws.updateUserMessage = (message) => {
|
|
| 59 |
ws.variableValues['user_message'] = message;
|
| 60 |
};
|
| 61 |
|
| 62 |
-
const
|
| 63 |
-
|
| 64 |
const codeEl = document.querySelector('#generatedCode code');
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
if (codeEl) {
|
| 67 |
codeEl.textContent = code;
|
| 68 |
}
|
|
@@ -87,10 +115,15 @@ try {
|
|
| 87 |
console.warn('Workspace load failed, clearing storage:', e);
|
| 88 |
localStorage.clear();
|
| 89 |
}
|
| 90 |
-
|
| 91 |
|
| 92 |
ws.addChangeListener((e) => {
|
| 93 |
if (e.isUiEvent) return;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
save(ws);
|
| 95 |
});
|
| 96 |
|
|
@@ -98,7 +131,7 @@ ws.addChangeListener((e) => {
|
|
| 98 |
if (e.isUiEvent || e.type == Blockly.Events.FINISHED_LOADING || ws.isDragging()) {
|
| 99 |
return;
|
| 100 |
}
|
| 101 |
-
|
| 102 |
});
|
| 103 |
|
| 104 |
window.addEventListener("message", (event) => {
|
|
|
|
| 52 |
theme: myTheme,
|
| 53 |
});
|
| 54 |
|
| 55 |
+
if (!ws.getVariable('user_message')) {
|
| 56 |
+
ws.createVariable('user_message');
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
ws.updateUserMessage = (message) => {
|
| 60 |
let variable = ws.getVariable('user_message');
|
| 61 |
if (!variable) ws.createVariable('user_message');
|
|
|
|
| 63 |
ws.variableValues['user_message'] = message;
|
| 64 |
};
|
| 65 |
|
| 66 |
+
const updateCode = () => {
|
| 67 |
+
let code = pythonGenerator.workspaceToCode(ws);
|
| 68 |
const codeEl = document.querySelector('#generatedCode code');
|
| 69 |
|
| 70 |
+
const response = `def get_assistant_response(prompt, model, use_history=True):
|
| 71 |
+
global history
|
| 72 |
+
from openai import OpenAI
|
| 73 |
+
import os
|
| 74 |
+
|
| 75 |
+
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
| 76 |
+
|
| 77 |
+
if use_history:
|
| 78 |
+
messages = history + [{"role": "user", "content": prompt}]
|
| 79 |
+
else:
|
| 80 |
+
messages = [{"role": "user", "content": prompt}]
|
| 81 |
+
|
| 82 |
+
completion = client.chat.completions.create(model=model, messages=messages)
|
| 83 |
+
return completion.choices[0].message.content.strip()
|
| 84 |
+
|
| 85 |
+
`;
|
| 86 |
+
|
| 87 |
+
const blocks = ws.getAllBlocks(false);
|
| 88 |
+
const hasResponse = blocks.some(block => block.type === 'assistant_reply');
|
| 89 |
+
|
| 90 |
+
if (hasResponse) {
|
| 91 |
+
code = response + code;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
if (codeEl) {
|
| 95 |
codeEl.textContent = code;
|
| 96 |
}
|
|
|
|
| 115 |
console.warn('Workspace load failed, clearing storage:', e);
|
| 116 |
localStorage.clear();
|
| 117 |
}
|
| 118 |
+
updateCode();
|
| 119 |
|
| 120 |
ws.addChangeListener((e) => {
|
| 121 |
if (e.isUiEvent) return;
|
| 122 |
+
|
| 123 |
+
if (!ws.getVariable('user_message')) {
|
| 124 |
+
ws.createVariable('user_message');
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
save(ws);
|
| 128 |
});
|
| 129 |
|
|
|
|
| 131 |
if (e.isUiEvent || e.type == Blockly.Events.FINISHED_LOADING || ws.isDragging()) {
|
| 132 |
return;
|
| 133 |
}
|
| 134 |
+
updateCode();
|
| 135 |
});
|
| 136 |
|
| 137 |
window.addEventListener("message", (event) => {
|
hello-world/src/toolbox.js
CHANGED
|
@@ -28,6 +28,23 @@ export const toolbox = {
|
|
| 28 |
kind: 'block',
|
| 29 |
type: 'assistant_reply',
|
| 30 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
]
|
| 32 |
},
|
| 33 |
{
|
|
@@ -190,10 +207,6 @@ export const toolbox = {
|
|
| 190 |
},
|
| 191 |
},
|
| 192 |
},
|
| 193 |
-
{
|
| 194 |
-
kind: 'block',
|
| 195 |
-
type: 'math_constant',
|
| 196 |
-
},
|
| 197 |
{
|
| 198 |
kind: 'block',
|
| 199 |
type: 'math_number_property',
|
|
@@ -225,13 +238,6 @@ export const toolbox = {
|
|
| 225 |
},
|
| 226 |
},
|
| 227 |
},
|
| 228 |
-
{
|
| 229 |
-
kind: 'block',
|
| 230 |
-
type: 'math_on_list',
|
| 231 |
-
fields: {
|
| 232 |
-
OP: 'SUM',
|
| 233 |
-
},
|
| 234 |
-
},
|
| 235 |
{
|
| 236 |
kind: 'block',
|
| 237 |
type: 'math_modulo',
|
|
@@ -306,10 +312,6 @@ export const toolbox = {
|
|
| 306 |
},
|
| 307 |
},
|
| 308 |
},
|
| 309 |
-
{
|
| 310 |
-
kind: 'block',
|
| 311 |
-
type: 'math_random_float',
|
| 312 |
-
},
|
| 313 |
],
|
| 314 |
},
|
| 315 |
{
|
|
|
|
| 28 |
kind: 'block',
|
| 29 |
type: 'assistant_reply',
|
| 30 |
},
|
| 31 |
+
{
|
| 32 |
+
kind: 'block',
|
| 33 |
+
type: 'get_assistant_response',
|
| 34 |
+
inputs: {
|
| 35 |
+
PROMPT: {
|
| 36 |
+
shadow: {
|
| 37 |
+
type: "variables_get",
|
| 38 |
+
fields: {
|
| 39 |
+
VAR: {
|
| 40 |
+
name: "user_message",
|
| 41 |
+
type: "String"
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
}
|
| 45 |
+
},
|
| 46 |
+
},
|
| 47 |
+
},
|
| 48 |
]
|
| 49 |
},
|
| 50 |
{
|
|
|
|
| 207 |
},
|
| 208 |
},
|
| 209 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
{
|
| 211 |
kind: 'block',
|
| 212 |
type: 'math_number_property',
|
|
|
|
| 238 |
},
|
| 239 |
},
|
| 240 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
{
|
| 242 |
kind: 'block',
|
| 243 |
type: 'math_modulo',
|
|
|
|
| 312 |
},
|
| 313 |
},
|
| 314 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
| 315 |
],
|
| 316 |
},
|
| 317 |
{
|
requirements.txt
CHANGED
|
@@ -1 +1,3 @@
|
|
| 1 |
-
gradio
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
dotenv
|
| 3 |
+
openai
|