mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-21 19:15:00 +00:00
feat: 🐍 add mamba support (#1589)
feat(mamba): Initial import This is a first iteration of the mamba backend, loosely based on mamba-chat(https://github.com/havenhq/mamba-chat).
This commit is contained in:
parent
52c9a7f45d
commit
9e653d6abe
13 changed files with 762 additions and 3 deletions
|
@ -33,7 +33,13 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||
def LoadModel(self, request, context):
|
||||
|
||||
# Get device
|
||||
device = "cuda" if request.CUDA else "cpu"
|
||||
# device = "cuda" if request.CUDA else "cpu"
|
||||
if torch.cuda.is_available():
|
||||
print("CUDA is available", file=sys.stderr)
|
||||
device = "cuda"
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue