ariG23498 HF Staff commited on
Commit
edfb747
·
verified ·
1 Parent(s): f8b3770

Upload ArliAI_gpt-oss-20b-Derestricted_1.py with huggingface_hub

Browse files
ArliAI_gpt-oss-20b-Derestricted_1.py CHANGED
@@ -25,6 +25,19 @@ try:
25
 
26
  tokenizer = AutoTokenizer.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
27
  model = AutoModelForCausalLM.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  with open('ArliAI_gpt-oss-20b-Derestricted_1.txt', 'w', encoding='utf-8') as f:
29
  f.write('Everything was good in ArliAI_gpt-oss-20b-Derestricted_1.txt')
30
  except Exception as e:
@@ -44,6 +57,19 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
44
 
45
  tokenizer = AutoTokenizer.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
46
  model = AutoModelForCausalLM.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ```
48
 
49
  ERROR:
 
25
 
26
  tokenizer = AutoTokenizer.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
27
  model = AutoModelForCausalLM.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
28
+ messages = [
29
+ {"role": "user", "content": "Who are you?"},
30
+ ]
31
+ inputs = tokenizer.apply_chat_template(
32
+ messages,
33
+ add_generation_prompt=True,
34
+ tokenize=True,
35
+ return_dict=True,
36
+ return_tensors="pt",
37
+ ).to(model.device)
38
+
39
+ outputs = model.generate(**inputs, max_new_tokens=40)
40
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
41
  with open('ArliAI_gpt-oss-20b-Derestricted_1.txt', 'w', encoding='utf-8') as f:
42
  f.write('Everything was good in ArliAI_gpt-oss-20b-Derestricted_1.txt')
43
  except Exception as e:
 
57
 
58
  tokenizer = AutoTokenizer.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
59
  model = AutoModelForCausalLM.from_pretrained("ArliAI/gpt-oss-20b-Derestricted")
60
+ messages = [
61
+ {"role": "user", "content": "Who are you?"},
62
+ ]
63
+ inputs = tokenizer.apply_chat_template(
64
+ messages,
65
+ add_generation_prompt=True,
66
+ tokenize=True,
67
+ return_dict=True,
68
+ return_tensors="pt",
69
+ ).to(model.device)
70
+
71
+ outputs = model.generate(**inputs, max_new_tokens=40)
72
+ print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
73
  ```
74
 
75
  ERROR: