Joe6636564 commited on
Commit
09a019f
·
verified ·
1 Parent(s): 42a1def

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -94,7 +94,8 @@ def chat_simple(message, history):
94
  max_new_tokens=256,
95
  pad_token_id=tokenizer.pad_token_id,
96
  do_sample=False,
97
- temperature=0.7
 
98
  )
99
 
100
  reply = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
@@ -136,7 +137,8 @@ def run_vision(image, text_input, model_id):
136
  max_new_tokens=400,
137
  do_sample=False,
138
  pad_token_id=processor.tokenizer.pad_token_id or processor.tokenizer.eos_token_id,
139
- temperature=0.7
 
140
  )
141
 
142
  output = output[:, inputs["input_ids"].shape[1]:]
@@ -178,7 +180,8 @@ async def api_chat(message: str = Form(...)):
178
  output = model.generate(
179
  input_ids,
180
  max_new_tokens=256,
181
- pad_token_id=tokenizer.pad_token_id
 
182
  )
183
 
184
  reply = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
 
94
  max_new_tokens=256,
95
  pad_token_id=tokenizer.pad_token_id,
96
  do_sample=False,
97
+ temperature=0.7,
98
+ use_cache=False
99
  )
100
 
101
  reply = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)
 
137
  max_new_tokens=400,
138
  do_sample=False,
139
  pad_token_id=processor.tokenizer.pad_token_id or processor.tokenizer.eos_token_id,
140
+ temperature=0.7,
141
+ use_cache=False
142
  )
143
 
144
  output = output[:, inputs["input_ids"].shape[1]:]
 
180
  output = model.generate(
181
  input_ids,
182
  max_new_tokens=256,
183
+ pad_token_id=tokenizer.pad_token_id,
184
+ use_cache=False
185
  )
186
 
187
  reply = tokenizer.decode(output[0][input_ids.shape[1]:], skip_special_tokens=True)