guanwenyu1995 commited on
Commit
7cb5938
·
verified ·
1 Parent(s): 84b2452

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +8 -8
README.md CHANGED
@@ -37,10 +37,10 @@ tokenizer = AutoTokenizer.from_pretrained(
37
  )
38
  device = next(model.model.parameters()).device
39
 
40
- # if enable_think
41
- # formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt = True, enable_thinking = True)
42
- # if disable_think
43
- formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt = True, enable_thinking = False)
44
 
45
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt').to(device)
46
  outputs = model.generate(
@@ -48,10 +48,10 @@ outputs = model.generate(
48
  max_new_tokens=1000,
49
  do_sample=True
50
  )
51
- # if enable think
52
- # ans = [i.split("<|im_start|> assistant\n", 1)[1].strip() for i in tokenizer.batch_decode(outputs)]
53
- # if disable think
54
- ans = [i.split("<|im_start|> assistant\n<think>\n\n</think>", 1)[1].strip() for i in tokenizer.batch_decode(outputs)]
55
  ```
56
 
57
  <p align="center">
 
37
  )
38
  device = next(model.model.parameters()).device
39
 
40
+ # if open think mode, use the following code
41
+ formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt = True, enable_thinking = True)
42
+ # if close think mode, use the following code
43
+ # formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt = True, enable_thinking = False)
44
 
45
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt').to(device)
46
  outputs = model.generate(
 
48
  max_new_tokens=1000,
49
  do_sample=True
50
  )
51
+ # if open think mode, use the following code
52
+ ans = [i.split("<|im_start|> assistant\n", 1)[1].strip() for i in tokenizer.batch_decode(outputs)]
53
+ # if close think mode, use the following code
54
+ # ans = [i.split("<|im_start|> assistant\n<think>\n\n</think>", 1)[1].strip() for i in tokenizer.batch_decode(outputs)]
55
  ```
56
 
57
  <p align="center">