mirror of
https://github.com/mudler/LocalAI.git
synced 2025-05-20 02:24:59 +00:00
feat(llama.cpp): estimate vram usage (#5299)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
bace6516f1
commit
5c6cd50ed6
7 changed files with 131 additions and 21 deletions
|
@ -16,6 +16,22 @@ func GPUs() ([]*gpu.GraphicsCard, error) {
|
|||
return gpu.GraphicsCards, nil
|
||||
}
|
||||
|
||||
func TotalAvailableVRAM() (uint64, error) {
|
||||
gpus, err := GPUs()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var totalVRAM uint64
|
||||
for _, gpu := range gpus {
|
||||
if gpu.Node.Memory.TotalUsableBytes > 0 {
|
||||
totalVRAM += uint64(gpu.Node.Memory.TotalUsableBytes)
|
||||
}
|
||||
}
|
||||
|
||||
return totalVRAM, nil
|
||||
}
|
||||
|
||||
func HasGPU(vendor string) bool {
|
||||
gpus, err := GPUs()
|
||||
if err != nil {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue