feat: first pass at improving logging (#1956)

Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com>
This commit is contained in:
cryptk 2024-04-04 02:24:22 -05:00 committed by GitHub
parent 3851b51d98
commit b85dad0286
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 53 additions and 43 deletions

View file

@ -84,7 +84,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
result, err := handleQuestion(config, req, ml, startupOptions, results[0].arguments, prompt)
if err != nil {
log.Error().Msgf("error handling question: %s", err.Error())
log.Error().Err(err).Msg("error handling question")
return
}
@ -268,7 +268,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
}
templatedChatMessage, err := ml.EvaluateTemplateForChatMessage(config.TemplateConfig.ChatMessage, chatMessageData)
if err != nil {
log.Error().Msgf("error processing message %+v using template \"%s\": %v. Skipping!", chatMessageData, config.TemplateConfig.ChatMessage, err)
log.Error().Err(err).Interface("message", chatMessageData).Str("template", config.TemplateConfig.ChatMessage).Msg("error processing message with template, skipping")
} else {
if templatedChatMessage == "" {
log.Warn().Msgf("template \"%s\" produced blank output for %+v. Skipping!", config.TemplateConfig.ChatMessage, chatMessageData)
@ -455,7 +455,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
case noActionsToRun:
result, err := handleQuestion(config, input, ml, startupOptions, results[0].arguments, predInput)
if err != nil {
log.Error().Msgf("error handling question: %s", err.Error())
log.Error().Err(err).Msg("error handling question")
return
}
*c = append(*c, schema.Choice{
@ -565,13 +565,13 @@ func handleQuestion(config *config.BackendConfig, input *schema.OpenAIRequest, m
predFunc, err := backend.ModelInference(input.Context, prompt, images, ml, *config, o, nil)
if err != nil {
log.Error().Msgf("inference error: %s", err.Error())
log.Error().Err(err).Msg("model inference failed")
return "", err
}
prediction, err := predFunc()
if err != nil {
log.Error().Msgf("inference error: %s", err.Error())
log.Error().Err(err).Msg("prediction failed")
return "", err
}
return backend.Finetune(*config, prompt, prediction.Response), nil

View file

@ -3,7 +3,6 @@ package openai
import (
"encoding/json"
"fmt"
"github.com/rs/zerolog/log"
"io"
"mime/multipart"
"net/http"
@ -12,6 +11,8 @@ import (
"path/filepath"
"strings"
"github.com/rs/zerolog/log"
"github.com/go-skynet/LocalAI/core/config"
utils2 "github.com/go-skynet/LocalAI/pkg/utils"
@ -297,7 +298,7 @@ func responseToListFile(t *testing.T, resp *http.Response) ListFiles {
err := json.NewDecoder(strings.NewReader(responseToString)).Decode(&listFiles)
if err != nil {
log.Error().Msgf("Failed to decode response: %s", err)
log.Error().Err(err).Msg("failed to decode response")
}
return listFiles