id
stringlengths 11
50
| scripts
listlengths 0
3
| code_urls
listlengths 0
3
| execution_urls
listlengths 0
3
| estimated_vram
float64 0
1.66k
|
|---|---|---|---|---|
deepseek-ai/DeepSeek-V3.2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_0.txt|deepseek-ai_DeepSeek-V3.2_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2_1.txt|deepseek-ai_DeepSeek-V3.2_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2_1.txt"
] | 1,659.65
|
mistralai/Mistral-Large-3-675B-Instruct-2512
|
[] |
[] |
[] | 0
|
microsoft/VibeVoice-Realtime-0.5B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-Realtime-0.5B\")\n with open('microsoft_VibeVoice-Realtime-0.5B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-Realtime-0.5B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-Realtime-0.5B_0.txt|microsoft_VibeVoice-Realtime-0.5B_0.txt>',\n )\n\n with open('microsoft_VibeVoice-Realtime-0.5B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-Realtime-0.5B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-Realtime-0.5B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-Realtime-0.5B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import VibeVoiceStreamingForConditionalGenerationInference\n model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(\"microsoft/VibeVoice-Realtime-0.5B\", dtype=\"auto\")\n with open('microsoft_VibeVoice-Realtime-0.5B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-Realtime-0.5B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-Realtime-0.5B_1.txt|microsoft_VibeVoice-Realtime-0.5B_1.txt>',\n )\n\n with open('microsoft_VibeVoice-Realtime-0.5B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import VibeVoiceStreamingForConditionalGenerationInference\nmodel = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(\"microsoft/VibeVoice-Realtime-0.5B\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-Realtime-0.5B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-Realtime-0.5B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-Realtime-0.5B_1.txt"
] | 2.46
|
black-forest-labs/FLUX.2-dev
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_0.txt|black-forest-labs_FLUX.2-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.2-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.2-dev_1.txt|black-forest-labs_FLUX.2-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.2-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.2-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.2-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.2-dev_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.2-dev_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.2-dev_1.txt"
] | 0
|
AIDC-AI/Ovis-Image-7B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"AIDC-AI/Ovis-Image-7B\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('AIDC-AI_Ovis-Image-7B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in AIDC-AI_Ovis-Image-7B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/AIDC-AI_Ovis-Image-7B_0.txt|AIDC-AI_Ovis-Image-7B_0.txt>',\n )\n\n with open('AIDC-AI_Ovis-Image-7B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"AIDC-AI/Ovis-Image-7B\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='AIDC-AI_Ovis-Image-7B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='AIDC-AI_Ovis-Image-7B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/AIDC-AI_Ovis-Image-7B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/AIDC-AI_Ovis-Image-7B_0.txt"
] | 0
|
nvidia/Nemotron-Orchestrator-8B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"nvidia/Nemotron-Orchestrator-8B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('nvidia_Nemotron-Orchestrator-8B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_Nemotron-Orchestrator-8B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_Nemotron-Orchestrator-8B_0.txt|nvidia_Nemotron-Orchestrator-8B_0.txt>',\n )\n\n with open('nvidia_Nemotron-Orchestrator-8B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"nvidia/Nemotron-Orchestrator-8B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_Nemotron-Orchestrator-8B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_Nemotron-Orchestrator-8B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"nvidia/Nemotron-Orchestrator-8B\")\n model = AutoModelForCausalLM.from_pretrained(\"nvidia/Nemotron-Orchestrator-8B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('nvidia_Nemotron-Orchestrator-8B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_Nemotron-Orchestrator-8B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_Nemotron-Orchestrator-8B_1.txt|nvidia_Nemotron-Orchestrator-8B_1.txt>',\n )\n\n with open('nvidia_Nemotron-Orchestrator-8B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"nvidia/Nemotron-Orchestrator-8B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"nvidia/Nemotron-Orchestrator-8B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_Nemotron-Orchestrator-8B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_Nemotron-Orchestrator-8B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_Nemotron-Orchestrator-8B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/nvidia_Nemotron-Orchestrator-8B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_Nemotron-Orchestrator-8B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/nvidia_Nemotron-Orchestrator-8B_1.txt"
] | 39.67
|
apple/starflow
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('apple_starflow_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in apple_starflow_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/apple_starflow_0.txt|apple_starflow_0.txt>',\n )\n\n with open('apple_starflow_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='apple_starflow_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='apple_starflow_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt|alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt>',\n )\n\n with open('alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='alibaba-pai_Z-Image-Turbo-Fun-Controlnet-Union_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
deepseek-ai/DeepSeek-V3.2-Speciale
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Speciale\")\n with open('deepseek-ai_DeepSeek-V3.2-Speciale_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Speciale_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Speciale_0.txt|deepseek-ai_DeepSeek-V3.2-Speciale_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Speciale_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Speciale\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Speciale_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Speciale_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Speciale\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2-Speciale_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Speciale_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Speciale_1.txt|deepseek-ai_DeepSeek-V3.2-Speciale_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Speciale_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Speciale\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Speciale_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Speciale_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Speciale_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Speciale_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Speciale_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Speciale_1.txt"
] | 1,659.65
|
deepseek-ai/DeepSeek-Math-V2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-Math-V2\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-Math-V2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-Math-V2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-Math-V2_0.txt|deepseek-ai_DeepSeek-Math-V2_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-Math-V2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-Math-V2\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-Math-V2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-Math-V2_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-Math-V2\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-Math-V2_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-Math-V2_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-Math-V2_1.txt|deepseek-ai_DeepSeek-Math-V2_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-Math-V2_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-Math-V2\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-Math-V2_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-Math-V2_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-Math-V2_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-Math-V2_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-Math-V2_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-Math-V2_1.txt"
] | 1,659.65
|
mistralai/Ministral-3-14B-Instruct-2512
|
[] |
[] |
[] | 33.77
|
apple/CLaRa-7B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"apple/CLaRa-7B-Instruct\", dtype=\"auto\")\n with open('apple_CLaRa-7B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in apple_CLaRa-7B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/apple_CLaRa-7B-Instruct_0.txt|apple_CLaRa-7B-Instruct_0.txt>',\n )\n\n with open('apple_CLaRa-7B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"apple/CLaRa-7B-Instruct\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='apple_CLaRa-7B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='apple_CLaRa-7B-Instruct_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/apple_CLaRa-7B-Instruct_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/apple_CLaRa-7B-Instruct_0.txt"
] | 0
|
EssentialAI/rnj-1-instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('EssentialAI_rnj-1-instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in EssentialAI_rnj-1-instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/EssentialAI_rnj-1-instruct_0.txt|EssentialAI_rnj-1-instruct_0.txt>',\n )\n\n with open('EssentialAI_rnj-1-instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='EssentialAI_rnj-1-instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='EssentialAI_rnj-1-instruct_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 40.25
|
mistralai/Ministral-3-3B-Instruct-2512
|
[] |
[] |
[] | 9.32
|
facebook/sam3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_0.txt|facebook_sam3_0.txt>',\n )\n\n with open('facebook_sam3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n with open('facebook_sam3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_1.txt|facebook_sam3_1.txt>',\n )\n\n with open('facebook_sam3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"mask-generation\", model=\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoImageProcessor, AutoModel\n \n processor = AutoImageProcessor.from_pretrained(\"facebook/sam3\")\n model = AutoModel.from_pretrained(\"facebook/sam3\")\n with open('facebook_sam3_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam3_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam3_2.txt|facebook_sam3_2.txt>',\n )\n\n with open('facebook_sam3_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoImageProcessor, AutoModel\n\nprocessor = AutoImageProcessor.from_pretrained(\"facebook/sam3\")\nmodel = AutoModel.from_pretrained(\"facebook/sam3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam3_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam3_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam3_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam3_2.txt"
] | 4.16
|
meituan-longcat/LongCat-Image
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image\", dtype=\"auto\")\n with open('meituan-longcat_LongCat-Image_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Image_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Image_0.txt|meituan-longcat_LongCat-Image_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Image_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Image_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Image_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Image_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Image_0.txt"
] | 0
|
tencent/HunyuanOCR
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"tencent/HunyuanOCR\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('tencent_HunyuanOCR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tencent_HunyuanOCR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tencent_HunyuanOCR_0.txt|tencent_HunyuanOCR_0.txt>',\n )\n\n with open('tencent_HunyuanOCR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"tencent/HunyuanOCR\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tencent_HunyuanOCR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tencent_HunyuanOCR_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForSeq2SeqLM\n model = AutoModelForSeq2SeqLM.from_pretrained(\"tencent/HunyuanOCR\", dtype=\"auto\")\n with open('tencent_HunyuanOCR_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tencent_HunyuanOCR_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tencent_HunyuanOCR_1.txt|tencent_HunyuanOCR_1.txt>',\n )\n\n with open('tencent_HunyuanOCR_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForSeq2SeqLM\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"tencent/HunyuanOCR\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tencent_HunyuanOCR_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tencent_HunyuanOCR_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/tencent_HunyuanOCR_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/tencent_HunyuanOCR_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/tencent_HunyuanOCR_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/tencent_HunyuanOCR_1.txt"
] | 2.41
|
Comfy-Org/z_image_turbo
|
[] |
[] |
[] | 0
|
NewBie-AI/NewBie-image-Exp0.1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"NewBie-AI/NewBie-image-Exp0.1\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('NewBie-AI_NewBie-image-Exp0.1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in NewBie-AI_NewBie-image-Exp0.1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/NewBie-AI_NewBie-image-Exp0.1_0.txt|NewBie-AI_NewBie-image-Exp0.1_0.txt>',\n )\n\n with open('NewBie-AI_NewBie-image-Exp0.1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"NewBie-AI/NewBie-image-Exp0.1\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='NewBie-AI_NewBie-image-Exp0.1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='NewBie-AI_NewBie-image-Exp0.1_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/NewBie-AI_NewBie-image-Exp0.1_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/NewBie-AI_NewBie-image-Exp0.1_0.txt"
] | 0
|
Tongyi-MAI/Z-Image-Turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Tongyi-MAI_Z-Image-Turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Tongyi-MAI_Z-Image-Turbo_0.txt|Tongyi-MAI_Z-Image-Turbo_0.txt>',\n )\n\n with open('Tongyi-MAI_Z-Image-Turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Tongyi-MAI_Z-Image-Turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Tongyi-MAI_Z-Image-Turbo_0.txt"
] | 0
|
ostris/Z-Image-De-Turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"ostris/Z-Image-De-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('ostris_Z-Image-De-Turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ostris_Z-Image-De-Turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ostris_Z-Image-De-Turbo_0.txt|ostris_Z-Image-De-Turbo_0.txt>',\n )\n\n with open('ostris_Z-Image-De-Turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"ostris/Z-Image-De-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ostris_Z-Image-De-Turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ostris_Z-Image-De-Turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ostris_Z-Image-De-Turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ostris_Z-Image-De-Turbo_0.txt"
] | 0
|
microsoft/Fara-7B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"microsoft/Fara-7B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('microsoft_Fara-7B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_Fara-7B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_Fara-7B_0.txt|microsoft_Fara-7B_0.txt>',\n )\n\n with open('microsoft_Fara-7B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"microsoft/Fara-7B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_Fara-7B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_Fara-7B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"microsoft/Fara-7B\")\n model = AutoModelForVision2Seq.from_pretrained(\"microsoft/Fara-7B\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('microsoft_Fara-7B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_Fara-7B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_Fara-7B_1.txt|microsoft_Fara-7B_1.txt>',\n )\n\n with open('microsoft_Fara-7B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"microsoft/Fara-7B\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"microsoft/Fara-7B\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_Fara-7B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_Fara-7B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_Fara-7B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_Fara-7B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_Fara-7B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_Fara-7B_1.txt"
] | 20.08
|
mistralai/Ministral-3-14B-Reasoning-2512
|
[] |
[] |
[] | 33.77
|
mistralai/Ministral-3-8B-Instruct-2512
|
[] |
[] |
[] | 21.59
|
salakash/SamKash-Tolstoy
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from peft import PeftModel\n from transformers import AutoModelForCausalLM\n \n base_model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\")\n model = PeftModel.from_pretrained(base_model, \"salakash/SamKash-Tolstoy\")\n with open('salakash_SamKash-Tolstoy_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in salakash_SamKash-Tolstoy_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/salakash_SamKash-Tolstoy_0.txt|salakash_SamKash-Tolstoy_0.txt>',\n )\n\n with open('salakash_SamKash-Tolstoy_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom peft import PeftModel\nfrom transformers import AutoModelForCausalLM\n\nbase_model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B\")\nmodel = PeftModel.from_pretrained(base_model, \"salakash/SamKash-Tolstoy\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='salakash_SamKash-Tolstoy_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='salakash_SamKash-Tolstoy_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/salakash_SamKash-Tolstoy_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/salakash_SamKash-Tolstoy_0.txt"
] | 0
|
aquif-ai/aquif-Image-14B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"aquif-ai/aquif-Image-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('aquif-ai_aquif-Image-14B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-Image-14B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-Image-14B_0.txt|aquif-ai_aquif-Image-14B_0.txt>',\n )\n\n with open('aquif-ai_aquif-Image-14B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"aquif-ai/aquif-Image-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-Image-14B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-Image-14B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-Image-14B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-Image-14B_0.txt"
] | 0
|
deepseek-ai/DeepSeek-V3.2-Exp
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt|deepseek-ai_DeepSeek-V3.2-Exp_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"deepseek-ai/DeepSeek-V3.2-Exp\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-V3.2-Exp_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt|deepseek-ai_DeepSeek-V3.2-Exp_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-V3.2-Exp_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"deepseek-ai/DeepSeek-V3.2-Exp\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-V3.2-Exp_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-V3.2-Exp_1.txt"
] | 1,659.65
|
arcee-ai/Trinity-Mini
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('arcee-ai_Trinity-Mini_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in arcee-ai_Trinity-Mini_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/arcee-ai_Trinity-Mini_0.txt|arcee-ai_Trinity-Mini_0.txt>',\n )\n\n with open('arcee-ai_Trinity-Mini_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='arcee-ai_Trinity-Mini_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='arcee-ai_Trinity-Mini_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('arcee-ai_Trinity-Mini_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in arcee-ai_Trinity-Mini_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/arcee-ai_Trinity-Mini_1.txt|arcee-ai_Trinity-Mini_1.txt>',\n )\n\n with open('arcee-ai_Trinity-Mini_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"arcee-ai/Trinity-Mini\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='arcee-ai_Trinity-Mini_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='arcee-ai_Trinity-Mini_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/arcee-ai_Trinity-Mini_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/arcee-ai_Trinity-Mini_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/arcee-ai_Trinity-Mini_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/arcee-ai_Trinity-Mini_1.txt"
] | 63.26
|
meituan-longcat/LongCat-Image-Edit
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-image\", model=\"meituan-longcat/LongCat-Image-Edit\")\n with open('meituan-longcat_LongCat-Image-Edit_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Image-Edit_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Image-Edit_0.txt|meituan-longcat_LongCat-Image-Edit_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Image-Edit_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-image\", model=\"meituan-longcat/LongCat-Image-Edit\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Image-Edit_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Image-Edit_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image-Edit\", dtype=\"auto\")\n with open('meituan-longcat_LongCat-Image-Edit_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Image-Edit_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Image-Edit_1.txt|meituan-longcat_LongCat-Image-Edit_1.txt>',\n )\n\n with open('meituan-longcat_LongCat-Image-Edit_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image-Edit\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Image-Edit_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Image-Edit_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Image-Edit_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Image-Edit_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Image-Edit_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Image-Edit_1.txt"
] | 0
|
SeeSee21/Z-Image-Turbo-AIO
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('SeeSee21_Z-Image-Turbo-AIO_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in SeeSee21_Z-Image-Turbo-AIO_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/SeeSee21_Z-Image-Turbo-AIO_0.txt|SeeSee21_Z-Image-Turbo-AIO_0.txt>',\n )\n\n with open('SeeSee21_Z-Image-Turbo-AIO_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='SeeSee21_Z-Image-Turbo-AIO_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='SeeSee21_Z-Image-Turbo-AIO_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
Supertone/supertonic
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Supertone_supertonic_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Supertone_supertonic_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Supertone_supertonic_0.txt|Supertone_supertonic_0.txt>',\n )\n\n with open('Supertone_supertonic_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Supertone_supertonic_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Supertone_supertonic_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
Quark-Vision/Live-Avatar
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('Quark-Vision_Live-Avatar_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Quark-Vision_Live-Avatar_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Quark-Vision_Live-Avatar_0.txt|Quark-Vision_Live-Avatar_0.txt>',\n )\n\n with open('Quark-Vision_Live-Avatar_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Quark-Vision_Live-Avatar_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Quark-Vision_Live-Avatar_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
mistralai/Ministral-3-3B-Reasoning-2512
|
[] |
[] |
[] | 10.3
|
microsoft/VibeVoice-1.5B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-1.5B\")\n with open('microsoft_VibeVoice-1.5B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-1.5B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-1.5B_0.txt|microsoft_VibeVoice-1.5B_0.txt>',\n )\n\n with open('microsoft_VibeVoice-1.5B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"microsoft/VibeVoice-1.5B\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-1.5B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-1.5B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForSeq2SeqLM\n model = AutoModelForSeq2SeqLM.from_pretrained(\"microsoft/VibeVoice-1.5B\", dtype=\"auto\")\n with open('microsoft_VibeVoice-1.5B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in microsoft_VibeVoice-1.5B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/microsoft_VibeVoice-1.5B_1.txt|microsoft_VibeVoice-1.5B_1.txt>',\n )\n\n with open('microsoft_VibeVoice-1.5B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForSeq2SeqLM\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"microsoft/VibeVoice-1.5B\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='microsoft_VibeVoice-1.5B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='microsoft_VibeVoice-1.5B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-1.5B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/microsoft_VibeVoice-1.5B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-1.5B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/microsoft_VibeVoice-1.5B_1.txt"
] | 6.55
|
tencent/HunyuanVideo-1.5
|
[] |
[] |
[] | 0
|
stepfun-ai/Step1X-Edit-v1p2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"stepfun-ai/Step1X-Edit-v1p2\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('stepfun-ai_Step1X-Edit-v1p2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step1X-Edit-v1p2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step1X-Edit-v1p2_0.txt|stepfun-ai_Step1X-Edit-v1p2_0.txt>',\n )\n\n with open('stepfun-ai_Step1X-Edit-v1p2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"stepfun-ai/Step1X-Edit-v1p2\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step1X-Edit-v1p2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step1X-Edit-v1p2_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step1X-Edit-v1p2_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step1X-Edit-v1p2_0.txt"
] | 0
|
moonshotai/Kimi-K2-Thinking
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('moonshotai_Kimi-K2-Thinking_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Thinking_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Thinking_0.txt|moonshotai_Kimi-K2-Thinking_0.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Thinking_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Thinking_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Thinking_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True, dtype=\"auto\")\n with open('moonshotai_Kimi-K2-Thinking_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in moonshotai_Kimi-K2-Thinking_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/moonshotai_Kimi-K2-Thinking_1.txt|moonshotai_Kimi-K2-Thinking_1.txt>',\n )\n\n with open('moonshotai_Kimi-K2-Thinking_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"moonshotai/Kimi-K2-Thinking\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='moonshotai_Kimi-K2-Thinking_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='moonshotai_Kimi-K2-Thinking_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Thinking_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/moonshotai_Kimi-K2-Thinking_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Thinking_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/moonshotai_Kimi-K2-Thinking_1.txt"
] | 0
|
NousResearch/Hermes-4.3-36B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"NousResearch/Hermes-4.3-36B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('NousResearch_Hermes-4.3-36B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in NousResearch_Hermes-4.3-36B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/NousResearch_Hermes-4.3-36B_0.txt|NousResearch_Hermes-4.3-36B_0.txt>',\n )\n\n with open('NousResearch_Hermes-4.3-36B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"NousResearch/Hermes-4.3-36B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='NousResearch_Hermes-4.3-36B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='NousResearch_Hermes-4.3-36B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"NousResearch/Hermes-4.3-36B\")\n model = AutoModelForCausalLM.from_pretrained(\"NousResearch/Hermes-4.3-36B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('NousResearch_Hermes-4.3-36B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in NousResearch_Hermes-4.3-36B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/NousResearch_Hermes-4.3-36B_1.txt|NousResearch_Hermes-4.3-36B_1.txt>',\n )\n\n with open('NousResearch_Hermes-4.3-36B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"NousResearch/Hermes-4.3-36B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"NousResearch/Hermes-4.3-36B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='NousResearch_Hermes-4.3-36B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='NousResearch_Hermes-4.3-36B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/NousResearch_Hermes-4.3-36B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/NousResearch_Hermes-4.3-36B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/NousResearch_Hermes-4.3-36B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/NousResearch_Hermes-4.3-36B_1.txt"
] | 87.54
|
arcee-ai/Trinity-Nano-Preview
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('arcee-ai_Trinity-Nano-Preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in arcee-ai_Trinity-Nano-Preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/arcee-ai_Trinity-Nano-Preview_0.txt|arcee-ai_Trinity-Nano-Preview_0.txt>',\n )\n\n with open('arcee-ai_Trinity-Nano-Preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='arcee-ai_Trinity-Nano-Preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='arcee-ai_Trinity-Nano-Preview_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('arcee-ai_Trinity-Nano-Preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in arcee-ai_Trinity-Nano-Preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/arcee-ai_Trinity-Nano-Preview_1.txt|arcee-ai_Trinity-Nano-Preview_1.txt>',\n )\n\n with open('arcee-ai_Trinity-Nano-Preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"arcee-ai/Trinity-Nano-Preview\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='arcee-ai_Trinity-Nano-Preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='arcee-ai_Trinity-Nano-Preview_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/arcee-ai_Trinity-Nano-Preview_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/arcee-ai_Trinity-Nano-Preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/arcee-ai_Trinity-Nano-Preview_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/arcee-ai_Trinity-Nano-Preview_1.txt"
] | 29.64
|
deepseek-ai/DeepSeek-OCR
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True)\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-OCR_0.txt|deepseek-ai_DeepSeek-OCR_0.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-OCR_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True, dtype=\"auto\")\n with open('deepseek-ai_DeepSeek-OCR_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in deepseek-ai_DeepSeek-OCR_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/deepseek-ai_DeepSeek-OCR_1.txt|deepseek-ai_DeepSeek-OCR_1.txt>',\n )\n\n with open('deepseek-ai_DeepSeek-OCR_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"deepseek-ai/DeepSeek-OCR\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='deepseek-ai_DeepSeek-OCR_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='deepseek-ai_DeepSeek-OCR_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-OCR_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/deepseek-ai_DeepSeek-OCR_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-OCR_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/deepseek-ai_DeepSeek-OCR_1.txt"
] | 8.08
|
stepfun-ai/GELab-Zero-4B-preview
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-to-text\", model=\"stepfun-ai/GELab-Zero-4B-preview\")\n with open('stepfun-ai_GELab-Zero-4B-preview_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_GELab-Zero-4B-preview_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_GELab-Zero-4B-preview_0.txt|stepfun-ai_GELab-Zero-4B-preview_0.txt>',\n )\n\n with open('stepfun-ai_GELab-Zero-4B-preview_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-to-text\", model=\"stepfun-ai/GELab-Zero-4B-preview\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_GELab-Zero-4B-preview_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_GELab-Zero-4B-preview_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\n model = AutoModelForVision2Seq.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\n with open('stepfun-ai_GELab-Zero-4B-preview_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_GELab-Zero-4B-preview_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_GELab-Zero-4B-preview_1.txt|stepfun-ai_GELab-Zero-4B-preview_1.txt>',\n )\n\n with open('stepfun-ai_GELab-Zero-4B-preview_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"stepfun-ai/GELab-Zero-4B-preview\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_GELab-Zero-4B-preview_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_GELab-Zero-4B-preview_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_GELab-Zero-4B-preview_1.txt"
] | 10.75
|
mistralai/Ministral-3-3B-Base-2512
|
[] |
[] |
[] | 10.3
|
ostris/zimage_turbo_training_adapter
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"ostris/zimage_turbo_training_adapter\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('ostris_zimage_turbo_training_adapter_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ostris_zimage_turbo_training_adapter_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ostris_zimage_turbo_training_adapter_0.txt|ostris_zimage_turbo_training_adapter_0.txt>',\n )\n\n with open('ostris_zimage_turbo_training_adapter_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"ostris/zimage_turbo_training_adapter\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ostris_zimage_turbo_training_adapter_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ostris_zimage_turbo_training_adapter_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ostris_zimage_turbo_training_adapter_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ostris_zimage_turbo_training_adapter_0.txt"
] | 0
|
tewea/z_image_turbo_bf16_nsfw
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('tewea_z_image_turbo_bf16_nsfw_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in tewea_z_image_turbo_bf16_nsfw_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/tewea_z_image_turbo_bf16_nsfw_0.txt|tewea_z_image_turbo_bf16_nsfw_0.txt>',\n )\n\n with open('tewea_z_image_turbo_bf16_nsfw_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='tewea_z_image_turbo_bf16_nsfw_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='tewea_z_image_turbo_bf16_nsfw_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
malcolmrey/zimage
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('malcolmrey_zimage_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in malcolmrey_zimage_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/malcolmrey_zimage_0.txt|malcolmrey_zimage_0.txt>',\n )\n\n with open('malcolmrey_zimage_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='malcolmrey_zimage_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='malcolmrey_zimage_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
mistralai/Ministral-3-14B-Base-2512
|
[] |
[] |
[] | 33.77
|
Phr00t/Qwen-Image-Edit-Rapid-AIO
|
[] |
[] |
[] | 0
|
Qwen/Qwen-Image-Edit-2509
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Turn this cat into a dog\"\n input_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n \n image = pipe(image=input_image, prompt=prompt).images[0]\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image-Edit-2509_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image-Edit-2509_0.txt|Qwen_Qwen-Image-Edit-2509_0.txt>',\n )\n\n with open('Qwen_Qwen-Image-Edit-2509_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image-Edit-2509\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Turn this cat into a dog\"\ninput_image = load_image(\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cat.png\")\n\nimage = pipe(image=input_image, prompt=prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image-Edit-2509_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image-Edit-2509_0.txt"
] | 0
|
stepfun-ai/Step-Audio-R1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('stepfun-ai_Step-Audio-R1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step-Audio-R1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step-Audio-R1_0.txt|stepfun-ai_Step-Audio-R1_0.txt>',\n )\n\n with open('stepfun-ai_Step-Audio-R1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step-Audio-R1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step-Audio-R1_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModelForCausalLM\n model = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-R1\", trust_remote_code=True, dtype=\"auto\")\n with open('stepfun-ai_Step-Audio-R1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stepfun-ai_Step-Audio-R1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stepfun-ai_Step-Audio-R1_1.txt|stepfun-ai_Step-Audio-R1_1.txt>',\n )\n\n with open('stepfun-ai_Step-Audio-R1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModelForCausalLM\nmodel = AutoModelForCausalLM.from_pretrained(\"stepfun-ai/Step-Audio-R1\", trust_remote_code=True, dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stepfun-ai_Step-Audio-R1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stepfun-ai_Step-Audio-R1_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step-Audio-R1_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stepfun-ai_Step-Audio-R1_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step-Audio-R1_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stepfun-ai_Step-Audio-R1_1.txt"
] | 81.09
|
Qwen/Qwen3-VL-8B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_0.txt|Qwen_Qwen3-VL-8B-Instruct_0.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n model = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-VL-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-VL-8B-Instruct_1.txt|Qwen_Qwen3-VL-8B-Instruct_1.txt>',\n )\n\n with open('Qwen_Qwen3-VL-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"Qwen/Qwen3-VL-8B-Instruct\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-VL-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-VL-8B-Instruct_1.txt"
] | 21.23
|
mistralai/Mistral-Large-3-675B-Instruct-2512-NVFP4
|
[] |
[] |
[] | 0
|
renderartist/Technically-Color-Z-Image-Turbo
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.load_lora_weights(\"renderartist/Technically-Color-Z-Image-Turbo\")\n \n prompt = \"t3chnic4lly vibrant 1960s close-up of a woman sitting under a tree in a blue skit and white blouse, she has blonde wavy short hair and a smile with green eyes lake scene by a garden with flowers in the foreground 1960s styl;e film She's holding her hand out there is a small smooth frog in her palm, she's making eye contact with the toad.\"\n image = pipe(prompt).images[0]\n with open('renderartist_Technically-Color-Z-Image-Turbo_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in renderartist_Technically-Color-Z-Image-Turbo_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/renderartist_Technically-Color-Z-Image-Turbo_0.txt|renderartist_Technically-Color-Z-Image-Turbo_0.txt>',\n )\n\n with open('renderartist_Technically-Color-Z-Image-Turbo_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Tongyi-MAI/Z-Image-Turbo\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.load_lora_weights(\"renderartist/Technically-Color-Z-Image-Turbo\")\n\nprompt = \"t3chnic4lly vibrant 1960s close-up of a woman sitting under a tree in a blue skit and white blouse, she has blonde wavy short hair and a smile with green eyes lake scene by a garden with flowers in the foreground 1960s styl;e film She's holding her hand out there is a small smooth frog in her palm, she's making eye contact with the toad.\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='renderartist_Technically-Color-Z-Image-Turbo_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='renderartist_Technically-Color-Z-Image-Turbo_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/renderartist_Technically-Color-Z-Image-Turbo_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/renderartist_Technically-Color-Z-Image-Turbo_0.txt"
] | 0
|
lovis93/Flux-2-Multi-Angles-LoRA-v2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt|lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt>',\n )\n\n with open('lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='lovis93_Flux-2-Multi-Angles-LoRA-v2_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
open-thoughts/OpenThinker-Agent-v1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"open-thoughts/OpenThinker-Agent-v1\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('open-thoughts_OpenThinker-Agent-v1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in open-thoughts_OpenThinker-Agent-v1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/open-thoughts_OpenThinker-Agent-v1_0.txt|open-thoughts_OpenThinker-Agent-v1_0.txt>',\n )\n\n with open('open-thoughts_OpenThinker-Agent-v1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"open-thoughts/OpenThinker-Agent-v1\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='open-thoughts_OpenThinker-Agent-v1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='open-thoughts_OpenThinker-Agent-v1_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"open-thoughts/OpenThinker-Agent-v1\")\n model = AutoModelForCausalLM.from_pretrained(\"open-thoughts/OpenThinker-Agent-v1\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('open-thoughts_OpenThinker-Agent-v1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in open-thoughts_OpenThinker-Agent-v1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/open-thoughts_OpenThinker-Agent-v1_1.txt|open-thoughts_OpenThinker-Agent-v1_1.txt>',\n )\n\n with open('open-thoughts_OpenThinker-Agent-v1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"open-thoughts/OpenThinker-Agent-v1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"open-thoughts/OpenThinker-Agent-v1\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='open-thoughts_OpenThinker-Agent-v1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='open-thoughts_OpenThinker-Agent-v1_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/open-thoughts_OpenThinker-Agent-v1_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/open-thoughts_OpenThinker-Agent-v1_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/open-thoughts_OpenThinker-Agent-v1_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/open-thoughts_OpenThinker-Agent-v1_1.txt"
] | 39.67
|
dx8152/Qwen-Edit-2509-Light-Migration
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('dx8152_Qwen-Edit-2509-Light-Migration_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in dx8152_Qwen-Edit-2509-Light-Migration_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/dx8152_Qwen-Edit-2509-Light-Migration_0.txt|dx8152_Qwen-Edit-2509-Light-Migration_0.txt>',\n )\n\n with open('dx8152_Qwen-Edit-2509-Light-Migration_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='dx8152_Qwen-Edit-2509-Light-Migration_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='dx8152_Qwen-Edit-2509-Light-Migration_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
openbmb/VoxCPM1.5
|
[] |
[] |
[] | 0
|
Kijai/WanVideo_comfy_fp8_scaled
|
[] |
[] |
[] | 0
|
facebook/sam-3d-objects
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('facebook_sam-3d-objects_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-3d-objects_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-3d-objects_0.txt|facebook_sam-3d-objects_0.txt>',\n )\n\n with open('facebook_sam-3d-objects_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-3d-objects_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-3d-objects_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('facebook_sam-3d-objects_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in facebook_sam-3d-objects_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/facebook_sam-3d-objects_1.txt|facebook_sam-3d-objects_1.txt>',\n )\n\n with open('facebook_sam-3d-objects_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='facebook_sam-3d-objects_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='facebook_sam-3d-objects_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/facebook_sam-3d-objects_0.py",
"DO NOT EXECUTE"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/facebook_sam-3d-objects_0.txt",
"WAS NOT EXECUTED"
] | 0
|
mistralai/Ministral-3-8B-Base-2512
|
[] |
[] |
[] | 21.59
|
mistralai/Mistral-Large-3-675B-Base-2512
|
[] |
[] |
[] | 0
|
nari-labs/Dia2-2B
|
[] |
[] |
[] | 0
|
meta-llama/Llama-3.1-8B-Instruct
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_0.txt|meta-llama_Llama-3.1-8B-Instruct_0.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_1.txt|meta-llama_Llama-3.1-8B-Instruct_1.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n model = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meta-llama_Llama-3.1-8B-Instruct_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meta-llama_Llama-3.1-8B-Instruct_2.txt|meta-llama_Llama-3.1-8B-Instruct_2.txt>',\n )\n\n with open('meta-llama_Llama-3.1-8B-Instruct_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmodel = AutoModelForCausalLM.from_pretrained(\"meta-llama/Llama-3.1-8B-Instruct\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meta-llama_Llama-3.1-8B-Instruct_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meta-llama_Llama-3.1-8B-Instruct_2.txt"
] | 19.44
|
Kijai/WanVideo_comfy
|
[] |
[] |
[] | 0
|
zai-org/GLM-4.6
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('zai-org_GLM-4.6_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_0.txt|zai-org_GLM-4.6_0.txt>',\n )\n\n with open('zai-org_GLM-4.6_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\n model = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('zai-org_GLM-4.6_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in zai-org_GLM-4.6_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/zai-org_GLM-4.6_1.txt|zai-org_GLM-4.6_1.txt>',\n )\n\n with open('zai-org_GLM-4.6_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"zai-org/GLM-4.6\")\nmodel = AutoModelForCausalLM.from_pretrained(\"zai-org/GLM-4.6\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='zai-org_GLM-4.6_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='zai-org_GLM-4.6_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/zai-org_GLM-4.6_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/zai-org_GLM-4.6_1.txt"
] | 863.94
|
nvidia/Alpamayo-R1-10B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('nvidia_Alpamayo-R1-10B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in nvidia_Alpamayo-R1-10B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/nvidia_Alpamayo-R1-10B_0.txt|nvidia_Alpamayo-R1-10B_0.txt>',\n )\n\n with open('nvidia_Alpamayo-R1-10B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='nvidia_Alpamayo-R1-10B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='nvidia_Alpamayo-R1-10B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 26.83
|
oumoumad/Qwen-Edit-2509-Material-transfer
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('oumoumad_Qwen-Edit-2509-Material-transfer_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in oumoumad_Qwen-Edit-2509-Material-transfer_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/oumoumad_Qwen-Edit-2509-Material-transfer_0.txt|oumoumad_Qwen-Edit-2509-Material-transfer_0.txt>',\n )\n\n with open('oumoumad_Qwen-Edit-2509-Material-transfer_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='oumoumad_Qwen-Edit-2509-Material-transfer_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='oumoumad_Qwen-Edit-2509-Material-transfer_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
pipecat-ai/smart-turn-v3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('pipecat-ai_smart-turn-v3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in pipecat-ai_smart-turn-v3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/pipecat-ai_smart-turn-v3_0.txt|pipecat-ai_smart-turn-v3_0.txt>',\n )\n\n with open('pipecat-ai_smart-turn-v3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='pipecat-ai_smart-turn-v3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='pipecat-ai_smart-turn-v3_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 0
|
MCG-NJU/SteadyDancer-14B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n from diffusers.utils import load_image, export_to_video\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"MCG-NJU/SteadyDancer-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\n pipe.to(\"cuda\")\n \n prompt = \"A man with short gray hair plays a red electric guitar.\"\n image = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n )\n \n output = pipe(image=image, prompt=prompt).frames[0]\n export_to_video(output, \"output.mp4\")\n with open('MCG-NJU_SteadyDancer-14B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in MCG-NJU_SteadyDancer-14B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/MCG-NJU_SteadyDancer-14B_0.txt|MCG-NJU_SteadyDancer-14B_0.txt>',\n )\n\n with open('MCG-NJU_SteadyDancer-14B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\nfrom diffusers.utils import load_image, export_to_video\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"MCG-NJU/SteadyDancer-14B\", dtype=torch.bfloat16, device_map=\"cuda\")\npipe.to(\"cuda\")\n\nprompt = \"A man with short gray hair plays a red electric guitar.\"\nimage = load_image(\n \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png\"\n)\n\noutput = pipe(image=image, prompt=prompt).frames[0]\nexport_to_video(output, \"output.mp4\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='MCG-NJU_SteadyDancer-14B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='MCG-NJU_SteadyDancer-14B_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/MCG-NJU_SteadyDancer-14B_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/MCG-NJU_SteadyDancer-14B_0.txt"
] | 0
|
PrimeIntellect/INTELLECT-3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('PrimeIntellect_INTELLECT-3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PrimeIntellect_INTELLECT-3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PrimeIntellect_INTELLECT-3_0.txt|PrimeIntellect_INTELLECT-3_0.txt>',\n )\n\n with open('PrimeIntellect_INTELLECT-3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PrimeIntellect_INTELLECT-3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PrimeIntellect_INTELLECT-3_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\n model = AutoModelForCausalLM.from_pretrained(\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('PrimeIntellect_INTELLECT-3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in PrimeIntellect_INTELLECT-3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/PrimeIntellect_INTELLECT-3_1.txt|PrimeIntellect_INTELLECT-3_1.txt>',\n )\n\n with open('PrimeIntellect_INTELLECT-3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"PrimeIntellect/INTELLECT-3\", trust_remote_code=True)\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='PrimeIntellect_INTELLECT-3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='PrimeIntellect_INTELLECT-3_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PrimeIntellect_INTELLECT-3_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/PrimeIntellect_INTELLECT-3_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PrimeIntellect_INTELLECT-3_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/PrimeIntellect_INTELLECT-3_1.txt"
] | 517.47
|
Comfy-Org/Ovis-Image
|
[] |
[] |
[] | 0
|
sentence-transformers/all-MiniLM-L6-v2
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in sentence-transformers_all-MiniLM-L6-v2_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/sentence-transformers_all-MiniLM-L6-v2_0.txt|sentence-transformers_all-MiniLM-L6-v2_0.txt>',\n )\n\n with open('sentence-transformers_all-MiniLM-L6-v2_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"sentence-transformers/all-MiniLM-L6-v2\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='sentence-transformers_all-MiniLM-L6-v2_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/sentence-transformers_all-MiniLM-L6-v2_0.txt"
] | 0.11
|
openai/whisper-large-v3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_0.txt|openai_whisper-large-v3_0.txt>',\n )\n\n with open('openai_whisper-large-v3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n \n processor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\n model = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n with open('openai_whisper-large-v3_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_whisper-large-v3_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_whisper-large-v3_1.txt|openai_whisper-large-v3_1.txt>',\n )\n\n with open('openai_whisper-large-v3_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForSpeechSeq2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"openai/whisper-large-v3\")\nmodel = AutoModelForSpeechSeq2Seq.from_pretrained(\"openai/whisper-large-v3\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_whisper-large-v3_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_whisper-large-v3_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_whisper-large-v3_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_whisper-large-v3_1.txt"
] | 7.47
|
maya-research/maya1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-to-speech\", model=\"maya-research/maya1\")\n with open('maya-research_maya1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in maya-research_maya1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/maya-research_maya1_0.txt|maya-research_maya1_0.txt>',\n )\n\n with open('maya-research_maya1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-to-speech\", model=\"maya-research/maya1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='maya-research_maya1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='maya-research_maya1_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"maya-research/maya1\")\n model = AutoModelForCausalLM.from_pretrained(\"maya-research/maya1\")\n with open('maya-research_maya1_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in maya-research_maya1_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/maya-research_maya1_1.txt|maya-research_maya1_1.txt>',\n )\n\n with open('maya-research_maya1_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"maya-research/maya1\")\nmodel = AutoModelForCausalLM.from_pretrained(\"maya-research/maya1\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='maya-research_maya1_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='maya-research_maya1_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/maya-research_maya1_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/maya-research_maya1_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/maya-research_maya1_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/maya-research_maya1_1.txt"
] | 7.99
|
mistralai/Mistral-Large-3-675B-Instruct-2512-Eagle
|
[] |
[] |
[] | 0
|
openai/gpt-oss-20b
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('openai_gpt-oss-20b_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_0.txt|openai_gpt-oss-20b_0.txt>',\n )\n\n with open('openai_gpt-oss-20b_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\n model = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('openai_gpt-oss-20b_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in openai_gpt-oss-20b_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/openai_gpt-oss-20b_1.txt|openai_gpt-oss-20b_1.txt>',\n )\n\n with open('openai_gpt-oss-20b_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"openai/gpt-oss-20b\")\nmodel = AutoModelForCausalLM.from_pretrained(\"openai/gpt-oss-20b\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='openai_gpt-oss-20b_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='openai_gpt-oss-20b_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/openai_gpt-oss-20b_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/openai_gpt-oss-20b_1.txt"
] | 52.09
|
ServiceNow-AI/Apriel-1.6-15b-Thinker
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt|ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt>',\n )\n\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt|ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt>',\n )\n\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForVision2Seq\n \n processor = AutoProcessor.from_pretrained(\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\n model = AutoModelForVision2Seq.from_pretrained(\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt|ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt>',\n )\n\n with open('ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForVision2Seq\n\nprocessor = AutoProcessor.from_pretrained(\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\nmodel = AutoModelForVision2Seq.from_pretrained(\"ServiceNow-AI/Apriel-1.6-15b-Thinker\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/ServiceNow-AI_Apriel-1.6-15b-Thinker_2.txt"
] | 35.99
|
T5B/Z-Image-Turbo-FP8
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"T5B/Z-Image-Turbo-FP8\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('T5B_Z-Image-Turbo-FP8_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in T5B_Z-Image-Turbo-FP8_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/T5B_Z-Image-Turbo-FP8_0.txt|T5B_Z-Image-Turbo-FP8_0.txt>',\n )\n\n with open('T5B_Z-Image-Turbo-FP8_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"T5B/Z-Image-Turbo-FP8\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='T5B_Z-Image-Turbo-FP8_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='T5B_Z-Image-Turbo-FP8_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/T5B_Z-Image-Turbo-FP8_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/T5B_Z-Image-Turbo-FP8_0.txt"
] | 0
|
aquif-ai/aquif-3.5-Max-1205
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Max-1205\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('aquif-ai_aquif-3.5-Max-1205_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Max-1205_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Max-1205_0.txt|aquif-ai_aquif-3.5-Max-1205_0.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Max-1205_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"aquif-ai/aquif-3.5-Max-1205\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Max-1205_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Max-1205_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Max-1205\")\n model = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Max-1205\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('aquif-ai_aquif-3.5-Max-1205_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in aquif-ai_aquif-3.5-Max-1205_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Max-1205_1.txt|aquif-ai_aquif-3.5-Max-1205_1.txt>',\n )\n\n with open('aquif-ai_aquif-3.5-Max-1205_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"aquif-ai/aquif-3.5-Max-1205\")\nmodel = AutoModelForCausalLM.from_pretrained(\"aquif-ai/aquif-3.5-Max-1205\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='aquif-ai_aquif-3.5-Max-1205_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='aquif-ai_aquif-3.5-Max-1205_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Max-1205_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/aquif-ai_aquif-3.5-Max-1205_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Max-1205_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/aquif-ai_aquif-3.5-Max-1205_1.txt"
] | 102.6
|
BAAI/bge-m3
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from sentence_transformers import SentenceTransformer\n \n model = SentenceTransformer(\"BAAI/bge-m3\")\n \n sentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n ]\n embeddings = model.encode(sentences)\n \n similarities = model.similarity(embeddings, embeddings)\n print(similarities.shape)\n # [4, 4]\n with open('BAAI_bge-m3_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in BAAI_bge-m3_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/BAAI_bge-m3_0.txt|BAAI_bge-m3_0.txt>',\n )\n\n with open('BAAI_bge-m3_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom sentence_transformers import SentenceTransformer\n\nmodel = SentenceTransformer(\"BAAI/bge-m3\")\n\nsentences = [\n \"That is a happy person\",\n \"That is a happy dog\",\n \"That is a very happy person\",\n \"Today is a sunny day\"\n]\nembeddings = model.encode(sentences)\n\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [4, 4]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='BAAI_bge-m3_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='BAAI_bge-m3_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/BAAI_bge-m3_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/BAAI_bge-m3_0.txt"
] | 0
|
EssentialAI/rnj-1
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # ⚠️ Type of model/library unknown.\n \n # Feel free to open a Pull request \n # for integration of the huggingface model hub\n # into the corresponding library =)\n with open('EssentialAI_rnj-1_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in EssentialAI_rnj-1_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/EssentialAI_rnj-1_0.txt|EssentialAI_rnj-1_0.txt>',\n )\n\n with open('EssentialAI_rnj-1_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# ⚠️ Type of model/library unknown.\n \n# Feel free to open a Pull request \n# for integration of the huggingface model hub\n# into the corresponding library =)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='EssentialAI_rnj-1_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='EssentialAI_rnj-1_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"DO NOT EXECUTE"
] |
[
"WAS NOT EXECUTED"
] | 40.25
|
mistralai/Ministral-3-8B-Reasoning-2512
|
[] |
[] |
[] | 21.59
|
black-forest-labs/FLUX.1-dev
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_0.txt|black-forest-labs_FLUX.1-dev_0.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in black-forest-labs_FLUX.1-dev_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/black-forest-labs_FLUX.1-dev_1.txt|black-forest-labs_FLUX.1-dev_1.txt>',\n )\n\n with open('black-forest-labs_FLUX.1-dev_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"black-forest-labs/FLUX.1-dev\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='black-forest-labs_FLUX.1-dev_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='black-forest-labs_FLUX.1-dev_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/black-forest-labs_FLUX.1-dev_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/black-forest-labs_FLUX.1-dev_1.txt"
] | 0
|
Qwen/Qwen-Image
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Qwen_Qwen-Image_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen-Image_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen-Image_0.txt|Qwen_Qwen-Image_0.txt>',\n )\n\n with open('Qwen_Qwen-Image_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Qwen/Qwen-Image\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen-Image_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen-Image_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen-Image_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen-Image_0.txt"
] | 0
|
Anzhc/Z-Image_Anime_VAE
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"Anzhc/Z-Image_Anime_VAE\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('Anzhc_Z-Image_Anime_VAE_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Anzhc_Z-Image_Anime_VAE_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Anzhc_Z-Image_Anime_VAE_0.txt|Anzhc_Z-Image_Anime_VAE_0.txt>',\n )\n\n with open('Anzhc_Z-Image_Anime_VAE_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"Anzhc/Z-Image_Anime_VAE\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Anzhc_Z-Image_Anime_VAE_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Anzhc_Z-Image_Anime_VAE_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Anzhc_Z-Image_Anime_VAE_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Anzhc_Z-Image_Anime_VAE_0.txt"
] | 0
|
Qwen/Qwen3-0.6B
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n pipe(messages)\n with open('Qwen_Qwen3-0.6B_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_0.txt|Qwen_Qwen3-0.6B_0.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"text-generation\", model=\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\npipe(messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoTokenizer, AutoModelForCausalLM\n \n tokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\n model = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\n messages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n ]\n inputs = tokenizer.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('Qwen_Qwen3-0.6B_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in Qwen_Qwen3-0.6B_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/Qwen_Qwen3-0.6B_1.txt|Qwen_Qwen3-0.6B_1.txt>',\n )\n\n with open('Qwen_Qwen3-0.6B_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\ntokenizer = AutoTokenizer.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmodel = AutoModelForCausalLM.from_pretrained(\"Qwen/Qwen3-0.6B\")\nmessages = [\n {\"role\": \"user\", \"content\": \"Who are you?\"},\n]\ninputs = tokenizer.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='Qwen_Qwen3-0.6B_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='Qwen_Qwen3-0.6B_1.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/Qwen_Qwen3-0.6B_1.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/Qwen_Qwen3-0.6B_1.txt"
] | 1.82
|
Phr00t/WAN2.2-14B-Rapid-AllInOne
|
[] |
[] |
[] | 0
|
google/gemma-3-4b-it
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n from huggingface_hub import login\n login(new_session=False)\n with open('google_gemma-3-4b-it_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-4b-it_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-4b-it_0.txt|google_gemma-3-4b-it_0.txt>',\n )\n\n with open('google_gemma-3-4b-it_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nfrom huggingface_hub import login\nlogin(new_session=False)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-4b-it_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-4b-it_0.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Use a pipeline as a high-level helper\n from transformers import pipeline\n \n pipe = pipeline(\"image-text-to-text\", model=\"google/gemma-3-4b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n pipe(text=messages)\n with open('google_gemma-3-4b-it_1.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-4b-it_1.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-4b-it_1.txt|google_gemma-3-4b-it_1.txt>',\n )\n\n with open('google_gemma-3-4b-it_1.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Use a pipeline as a high-level helper\nfrom transformers import pipeline\n\npipe = pipeline(\"image-text-to-text\", model=\"google/gemma-3-4b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\npipe(text=messages)\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-4b-it_1.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-4b-it_1.txt',\n repo_type='dataset',\n )\n",
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoProcessor, AutoModelForMultimodalLM\n \n processor = AutoProcessor.from_pretrained(\"google/gemma-3-4b-it\")\n model = AutoModelForMultimodalLM.from_pretrained(\"google/gemma-3-4b-it\")\n messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n ]\n inputs = processor.apply_chat_template(\n \tmessages,\n \tadd_generation_prompt=True,\n \ttokenize=True,\n \treturn_dict=True,\n \treturn_tensors=\"pt\",\n ).to(model.device)\n \n outputs = model.generate(**inputs, max_new_tokens=40)\n print(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n with open('google_gemma-3-4b-it_2.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in google_gemma-3-4b-it_2.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/google_gemma-3-4b-it_2.txt|google_gemma-3-4b-it_2.txt>',\n )\n\n with open('google_gemma-3-4b-it_2.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoProcessor, AutoModelForMultimodalLM\n\nprocessor = AutoProcessor.from_pretrained(\"google/gemma-3-4b-it\")\nmodel = AutoModelForMultimodalLM.from_pretrained(\"google/gemma-3-4b-it\")\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG\"},\n {\"type\": \"text\", \"text\": \"What animal is on the candy?\"}\n ]\n },\n]\ninputs = processor.apply_chat_template(\n\tmessages,\n\tadd_generation_prompt=True,\n\ttokenize=True,\n\treturn_dict=True,\n\treturn_tensors=\"pt\",\n).to(model.device)\n\noutputs = model.generate(**inputs, max_new_tokens=40)\nprint(processor.decode(outputs[0][inputs[\"input_ids\"].shape[-1]:]))\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='google_gemma-3-4b-it_2.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='google_gemma-3-4b-it_2.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-4b-it_0.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-4b-it_1.py",
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/google_gemma-3-4b-it_2.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-4b-it_0.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-4b-it_1.txt",
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/google_gemma-3-4b-it_2.txt"
] | 10.41
|
meituan-longcat/LongCat-Image-Dev
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n # Load model directly\n from transformers import AutoModel\n model = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image-Dev\", dtype=\"auto\")\n with open('meituan-longcat_LongCat-Image-Dev_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in meituan-longcat_LongCat-Image-Dev_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/meituan-longcat_LongCat-Image-Dev_0.txt|meituan-longcat_LongCat-Image-Dev_0.txt>',\n )\n\n with open('meituan-longcat_LongCat-Image-Dev_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \n# Load model directly\nfrom transformers import AutoModel\nmodel = AutoModel.from_pretrained(\"meituan-longcat/LongCat-Image-Dev\", dtype=\"auto\")\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='meituan-longcat_LongCat-Image-Dev_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='meituan-longcat_LongCat-Image-Dev_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/meituan-longcat_LongCat-Image-Dev_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/meituan-longcat_LongCat-Image-Dev_0.txt"
] | 0
|
stabilityai/stable-diffusion-xl-base-1.0
|
[
"# /// script\n# requires-python = \">=3.12\"\n# dependencies = [\n# \"numpy\",\n# \"einops\",\n# \"pandas\",\n# \"matplotlib\",\n# \"protobuf\",\n# \"torch\",\n# \"sentencepiece\",\n# \"torchvision\",\n# \"transformers\",\n# \"timm\",\n# \"diffusers\",\n# \"sentence-transformers\",\n# \"accelerate\",\n# \"peft\",\n# \"slack-sdk\",\n# ]\n# ///\n\ntry:\n import torch\n from diffusers import DiffusionPipeline\n \n # switch to \"mps\" for apple devices\n pipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\", dtype=torch.bfloat16, device_map=\"cuda\")\n \n prompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\n image = pipe(prompt).images[0]\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'w', encoding='utf-8') as f:\n f.write('Everything was good in stabilityai_stable-diffusion-xl-base-1.0_0.txt')\nexcept Exception as e:\n import os\n from slack_sdk import WebClient\n client = WebClient(token=os.environ['SLACK_TOKEN'])\n client.chat_postMessage(\n channel='#hub-model-metadata-snippets-sprint',\n text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt|stabilityai_stable-diffusion-xl-base-1.0_0.txt>',\n )\n\n with open('stabilityai_stable-diffusion-xl-base-1.0_0.txt', 'a', encoding='utf-8') as f:\n import traceback\n f.write('''```CODE: \nimport torch\nfrom diffusers import DiffusionPipeline\n\n# switch to \"mps\" for apple devices\npipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-xl-base-1.0\", dtype=torch.bfloat16, device_map=\"cuda\")\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nimage = pipe(prompt).images[0]\n```\n\nERROR: \n''')\n traceback.print_exc(file=f)\n \nfinally:\n from huggingface_hub import upload_file\n upload_file(\n path_or_fileobj='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_id='model-metadata/code_execution_files',\n path_in_repo='stabilityai_stable-diffusion-xl-base-1.0_0.txt',\n repo_type='dataset',\n )\n"
] |
[
"https://huggingface.co/datasets/model-metadata/code_python_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.py"
] |
[
"https://huggingface.co/datasets/model-metadata/code_execution_files/raw/main/stabilityai_stable-diffusion-xl-base-1.0_0.txt"
] | 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.