Spaces:
Sleeping
Sleeping
Commit
·
2da383c
1
Parent(s):
da26f47
Update main.py
Browse files
main.py
CHANGED
|
@@ -37,26 +37,28 @@ llms = {
|
|
| 37 |
"suffix": "<|endoftext|><|assistant|>"
|
| 38 |
}
|
| 39 |
}
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
|
| 42 |
#Fast API
|
| 43 |
app = FastAPI()
|
| 44 |
-
|
| 45 |
|
| 46 |
-
@app.post("/llm_on_cpu")
|
| 47 |
-
async def stream(item: dict):
|
| 48 |
|
|
|
|
|
|
|
| 49 |
model = llms[item['llm']]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
prefix=model['prefix']
|
| 51 |
suffix=model['suffix']
|
| 52 |
-
nctx = item['nctx'] if 'nctx' in item.keys() else model['nctx']
|
| 53 |
max_tokens = item['max_tokens'] if 'max_tokens' in item.keys() else 512
|
| 54 |
user="""
|
| 55 |
{prompt}"""
|
| 56 |
-
|
| 57 |
-
llm = Llama(model_path="./code/"+model['file'], n_ctx=nctx, verbose=True, n_threads=8)
|
| 58 |
-
|
| 59 |
prompt = f"{prefix}{user.replace('{prompt}', item['prompt'])}{suffix}"
|
| 60 |
result = llm(prompt, max_tokens=max_tokens)
|
| 61 |
-
llm = None
|
| 62 |
return result
|
|
|
|
| 37 |
"suffix": "<|endoftext|><|assistant|>"
|
| 38 |
}
|
| 39 |
}
|
| 40 |
+
|
| 41 |
+
model = llms[llms.keys()[0]]
|
| 42 |
+
llm = Llama(model_path="./code/"+model['file'], n_ctx=nctx, verbose=True, n_threads=8)
|
| 43 |
|
| 44 |
|
| 45 |
#Fast API
|
| 46 |
app = FastAPI()
|
|
|
|
| 47 |
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
@app.post("/change_llm")
|
| 50 |
+
async def change(item: dict):
|
| 51 |
model = llms[item['llm']]
|
| 52 |
+
nctx = item['nctx'] if 'nctx' in item.keys() else model['nctx']
|
| 53 |
+
llm = Llama(model_path="./code/"+model['file'], n_ctx=nctx, verbose=True, n_threads=8)
|
| 54 |
+
|
| 55 |
+
@app.post("/llm_on_cpu")
|
| 56 |
+
async def stream(item: dict):
|
| 57 |
prefix=model['prefix']
|
| 58 |
suffix=model['suffix']
|
|
|
|
| 59 |
max_tokens = item['max_tokens'] if 'max_tokens' in item.keys() else 512
|
| 60 |
user="""
|
| 61 |
{prompt}"""
|
|
|
|
|
|
|
|
|
|
| 62 |
prompt = f"{prefix}{user.replace('{prompt}', item['prompt'])}{suffix}"
|
| 63 |
result = llm(prompt, max_tokens=max_tokens)
|
|
|
|
| 64 |
return result
|