Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,54 +1,57 @@
|
|
| 1 |
-
import
|
|
|
|
| 2 |
import torch
|
| 3 |
-
from transformers import AutoProcessor, Gemma3ForConditionalGeneration, TextIteratorStreamer
|
| 4 |
-
import os
|
| 5 |
-
from huggingface_hub import login
|
| 6 |
-
import os
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
model_id, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="eager",cache_dir = "F:\\huggingface_cache"
|
| 16 |
)
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
'''
|
| 21 |
-
conversation = [
|
| 22 |
-
{
|
| 23 |
-
"role": "user",
|
| 24 |
-
"content": [
|
| 25 |
-
{"type": "image", "image": "https://www.ilankelman.org/stopsigns/australia.jpg"},
|
| 26 |
-
{"type": "text", "text": "Please describe this image in detail."},
|
| 27 |
-
],
|
| 28 |
-
},
|
| 29 |
-
]
|
| 30 |
-
'''
|
| 31 |
-
messages_list.append({"role": "user", "content":[{ "type":"text","text": message}] })
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
inputs,
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
| 46 |
)
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
-
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 54 |
demo.launch()
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 3 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
# Model ve tokenizer yükleme
|
| 6 |
+
model_name = "rahul7star/gemma-3bit" # veya "google/gemma-3-1b-it"
|
| 7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 9 |
+
model_name,
|
| 10 |
+
torch_dtype=torch.float16,
|
| 11 |
+
device_map="auto"
|
|
|
|
| 12 |
)
|
| 13 |
|
| 14 |
+
# Sistem promptu
|
| 15 |
+
system_prompt = """"Enhance and expand the following prompt with more details and context:."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
def predict(message, history):
|
| 18 |
+
# Sohbet geçmişini formatlama
|
| 19 |
+
messages = [{"role": "system", "content": system_prompt}]
|
| 20 |
+
|
| 21 |
+
# Önceki mesajları ekleme
|
| 22 |
+
for user_msg, bot_msg in history:
|
| 23 |
+
messages.append({"role": "user", "content": user_msg})
|
| 24 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
| 25 |
+
|
| 26 |
+
# Yeni mesajı ekleme
|
| 27 |
+
messages.append({"role": "user", "content": message})
|
| 28 |
+
|
| 29 |
+
# Tokenize etme
|
| 30 |
+
inputs = tokenizer.apply_chat_template(
|
| 31 |
+
messages,
|
| 32 |
+
return_tensors="pt"
|
| 33 |
+
).to(model.device)
|
| 34 |
+
|
| 35 |
+
# Yanıt üretme
|
| 36 |
+
outputs = model.generate(
|
| 37 |
inputs,
|
| 38 |
+
max_new_tokens=512,
|
| 39 |
+
temperature=0.7,
|
| 40 |
+
top_p=0.95,
|
| 41 |
+
do_sample=True
|
| 42 |
)
|
| 43 |
+
|
| 44 |
+
# Yanıtı decode etme
|
| 45 |
+
response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
|
| 46 |
+
|
| 47 |
+
return response
|
| 48 |
+
|
| 49 |
+
# Gradio arayüzü
|
| 50 |
+
demo = gr.ChatInterface(
|
| 51 |
+
fn=predict,
|
| 52 |
+
title="Sağlık Danışmanı Chatbot (Gemma 3)",
|
| 53 |
+
description="Bu chatbot, sağlık, beslenme ve egzersiz konularında bilgi vermek için tasarlanmıştır. Tıbbi teşhis koymaz veya reçete önermez.",
|
| 54 |
+
theme="soft"
|
| 55 |
+
)
|
| 56 |
|
|
|
|
| 57 |
demo.launch()
|