mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-22 03:24:59 +00:00
fix: cut prompt from AutoGPTQ answers
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
bb7772a364
commit
219751bb21
1 changed files with 6 additions and 1 deletions
|
@ -61,7 +61,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
repetition_penalty=penalty,
|
repetition_penalty=penalty,
|
||||||
)
|
)
|
||||||
return backend_pb2.Result(message=bytes(pipeline(request.Prompt)[0]["generated_text"], encoding='utf-8'))
|
t = pipeline(request.Prompt)[0]["generated_text"]
|
||||||
|
# Remove prompt from response if present
|
||||||
|
if request.Prompt in t:
|
||||||
|
t = t.replace(request.Prompt, "")
|
||||||
|
|
||||||
|
return backend_pb2.Result(message=bytes(t, encoding='utf-8'))
|
||||||
|
|
||||||
def PredictStream(self, request, context):
|
def PredictStream(self, request, context):
|
||||||
# Implement PredictStream RPC
|
# Implement PredictStream RPC
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue