Doctor / app.py
victorsconcious's picture
Update app.py
dd4f303 verified
raw
history blame
1.42 kB
import os
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
from huggingface_hub import login
# --- Authenticate with HF token (from Spaces Secrets) ---
login(os.environ["HF_TOKEN"])
# --- Model setup ---
MODEL_ID = "google/medgemma-4b-it"
# 4-bit quantization config
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
# Load model + tokenizer with quantization
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
quantization_config=bnb_config,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
# --- Gradio app ---
def medgemma_chat(prompt):
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
return outputs[0]["generated_text"]
demo = gr.Interface(
fn=medgemma_chat,
inputs=gr.Textbox(label="Enter medical question", lines=4, placeholder="e.g. What are symptoms of malaria?"),
outputs=gr.Textbox(label="MedGemma Response"),
title="🧠 MedGemma (4-bit Quantized)",
description="Ask medical questions (research/demo use only). Running in 4-bit quantized mode for efficiency."
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)