ariG23498 HF Staff commited on
Commit
da82b69
·
verified ·
1 Parent(s): 986a1ed

Upload aquif-ai_aquif-3.5-Plus-30B-A3B_1.py with huggingface_hub

Browse files
aquif-ai_aquif-3.5-Plus-30B-A3B_1.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "numpy",
5
+ # "einops",
6
+ # "pandas",
7
+ # "protobuf",
8
+ # "torch",
9
+ # "sentencepiece",
10
+ # "torchvision",
11
+ # "transformers",
12
+ # "timm",
13
+ # "diffusers",
14
+ # "sentence-transformers",
15
+ # "accelerate",
16
+ # "peft",
17
+ # "slack-sdk",
18
+ # ]
19
+ # ///
20
+
21
+ try:
22
+ # Load model directly
23
+ from transformers import AutoTokenizer, AutoModelForCausalLM
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained("aquif-ai/aquif-3.5-Plus-30B-A3B")
26
+ model = AutoModelForCausalLM.from_pretrained("aquif-ai/aquif-3.5-Plus-30B-A3B")
27
+ messages = [
28
+ {"role": "user", "content": "Who are you?"},
29
+ ]
30
+ inputs = tokenizer.apply_chat_template(
31
+ messages,
32
+ add_generation_prompt=True,
33
+ tokenize=True,
34
+ return_dict=True,
35
+ return_tensors="pt",
36
+ ).to(model.device)
37
+
38
+ outputs = model.generate(**inputs, max_new_tokens=40)
39
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
40
+ with open('aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt', 'w', encoding='utf-8') as f:
41
+ f.write('Everything was good in aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt')
42
+ except Exception as e:
43
+ import os
44
+ from slack_sdk import WebClient
45
+ client = WebClient(token=os.environ['SLACK_TOKEN'])
46
+ client.chat_postMessage(
47
+ channel='#hub-model-metadata-snippets-sprint',
48
+ text='Problem in <https://huggingface.co/datasets/model-metadata/code_execution_files/blob/main/aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt|aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt>',
49
+ )
50
+
51
+ with open('aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt', 'a', encoding='utf-8') as f:
52
+ import traceback
53
+ f.write('''```CODE:
54
+ # Load model directly
55
+ from transformers import AutoTokenizer, AutoModelForCausalLM
56
+
57
+ tokenizer = AutoTokenizer.from_pretrained("aquif-ai/aquif-3.5-Plus-30B-A3B")
58
+ model = AutoModelForCausalLM.from_pretrained("aquif-ai/aquif-3.5-Plus-30B-A3B")
59
+ messages = [
60
+ {"role": "user", "content": "Who are you?"},
61
+ ]
62
+ inputs = tokenizer.apply_chat_template(
63
+ messages,
64
+ add_generation_prompt=True,
65
+ tokenize=True,
66
+ return_dict=True,
67
+ return_tensors="pt",
68
+ ).to(model.device)
69
+
70
+ outputs = model.generate(**inputs, max_new_tokens=40)
71
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
72
+ ```
73
+
74
+ ERROR:
75
+ ''')
76
+ traceback.print_exc(file=f)
77
+
78
+ finally:
79
+ from huggingface_hub import upload_file
80
+ upload_file(
81
+ path_or_fileobj='aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt',
82
+ repo_id='model-metadata/code_execution_files',
83
+ path_in_repo='aquif-ai_aquif-3.5-Plus-30B-A3B_1.txt',
84
+ repo_type='dataset',
85
+ )