| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| # Define the model path | |
| MODEL_PATH = "StevesInfinityDrive/Nova-fine-tuning" # Adjust if using Hugging Face Hub or cloud storage | |
| def load_model(): | |
| print(f"π Loading model from: {MODEL_PATH}") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False) | |
| model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, trust_remote_code=True) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model.to(device) | |
| print("β Model successfully loaded.") | |
| return model, tokenizer, device | |
| # Test the loader | |
| if __name__ == "__main__": | |
| model, tokenizer, device = load_model() | |
| print("Model and tokenizer successfully loaded.") |