grammar-correction-api / upload_lora.py
Enoch Jason J
Deploy final app and ignore local artifacts
6401a84
from huggingface_hub import HfApi, create_repo
import os
# --- Configuration ---
# 1. Set the path to the local folder containing your clean LoRA adapter.
# (Ensure you have removed checkpoints and optimizer files).
LOCAL_LORA_PATH = "./gemma-grammar-lora"
# 2. Define the name for your new model repository on the Hub.
# It's standard to use "YourUsername/YourModelName".
REPO_ID = "enoch10jason/gemma-grammar-lora"
# --- Upload Script ---
def main():
# Ensure the local path exists
if not os.path.isdir(LOCAL_LORA_PATH):
print(f"❌ Error: Local LoRA path not found at '{LOCAL_LORA_PATH}'")
print("Please ensure your clean 'gemma-grammar-lora' folder is inside your project directory.")
return
api = HfApi()
# Create the repository on the Hugging Face Hub (can be private)
create_repo(repo_id=REPO_ID, repo_type="model", exist_ok=True, private=True)
print(f"Uploading files from '{LOCAL_LORA_PATH}' to '{REPO_ID}'...")
# Upload the entire folder. This will automatically use Git LFS for large files.
api.upload_folder(
folder_path=LOCAL_LORA_PATH,
repo_id=REPO_ID,
repo_type="model",
)
print(f"βœ… LoRA adapter uploaded successfully to: https://huggingface.co/{REPO_ID}")
if __name__ == "__main__":
main()