mirror of
https://github.com/mudler/LocalAI.git
synced 2025-06-30 06:30:43 +00:00
Move cfg scale out of diffusers block
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
parent
354928b914
commit
606f21520b
4 changed files with 18 additions and 14 deletions
|
@ -122,7 +122,7 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions {
|
||||||
CUDA: c.CUDA || c.Diffusers.CUDA,
|
CUDA: c.CUDA || c.Diffusers.CUDA,
|
||||||
SchedulerType: c.Diffusers.SchedulerType,
|
SchedulerType: c.Diffusers.SchedulerType,
|
||||||
PipelineType: c.Diffusers.PipelineType,
|
PipelineType: c.Diffusers.PipelineType,
|
||||||
CFGScale: c.Diffusers.CFGScale,
|
CFGScale: c.CFGScale,
|
||||||
LoraAdapter: c.LoraAdapter,
|
LoraAdapter: c.LoraAdapter,
|
||||||
LoraScale: c.LoraScale,
|
LoraScale: c.LoraScale,
|
||||||
LoraAdapters: c.LoraAdapters,
|
LoraAdapters: c.LoraAdapters,
|
||||||
|
|
|
@ -99,16 +99,15 @@ type GRPC struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Diffusers struct {
|
type Diffusers struct {
|
||||||
CUDA bool `yaml:"cuda"`
|
CUDA bool `yaml:"cuda"`
|
||||||
PipelineType string `yaml:"pipeline_type"`
|
PipelineType string `yaml:"pipeline_type"`
|
||||||
SchedulerType string `yaml:"scheduler_type"`
|
SchedulerType string `yaml:"scheduler_type"`
|
||||||
EnableParameters string `yaml:"enable_parameters"` // A list of comma separated parameters to specify
|
EnableParameters string `yaml:"enable_parameters"` // A list of comma separated parameters to specify
|
||||||
CFGScale float32 `yaml:"cfg_scale"` // Classifier-Free Guidance Scale
|
IMG2IMG bool `yaml:"img2img"` // Image to Image Diffuser
|
||||||
IMG2IMG bool `yaml:"img2img"` // Image to Image Diffuser
|
ClipSkip int `yaml:"clip_skip"` // Skip every N frames
|
||||||
ClipSkip int `yaml:"clip_skip"` // Skip every N frames
|
ClipModel string `yaml:"clip_model"` // Clip model to use
|
||||||
ClipModel string `yaml:"clip_model"` // Clip model to use
|
ClipSubFolder string `yaml:"clip_subfolder"` // Subfolder to use for clip model
|
||||||
ClipSubFolder string `yaml:"clip_subfolder"` // Subfolder to use for clip model
|
ControlNet string `yaml:"control_net"`
|
||||||
ControlNet string `yaml:"control_net"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LLMConfig is a struct that holds the configuration that are
|
// LLMConfig is a struct that holds the configuration that are
|
||||||
|
@ -166,6 +165,8 @@ type LLMConfig struct {
|
||||||
YarnAttnFactor float32 `yaml:"yarn_attn_factor"`
|
YarnAttnFactor float32 `yaml:"yarn_attn_factor"`
|
||||||
YarnBetaFast float32 `yaml:"yarn_beta_fast"`
|
YarnBetaFast float32 `yaml:"yarn_beta_fast"`
|
||||||
YarnBetaSlow float32 `yaml:"yarn_beta_slow"`
|
YarnBetaSlow float32 `yaml:"yarn_beta_slow"`
|
||||||
|
|
||||||
|
CFGScale float32 `yaml:"cfg_scale"` // Classifier-Free Guidance Scale
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoGPTQ is a struct that holds the configuration specific to the AutoGPTQ backend
|
// AutoGPTQ is a struct that holds the configuration specific to the AutoGPTQ backend
|
||||||
|
|
|
@ -194,8 +194,9 @@ diffusers:
|
||||||
pipeline_type: StableDiffusionPipeline
|
pipeline_type: StableDiffusionPipeline
|
||||||
enable_parameters: "negative_prompt,num_inference_steps,clip_skip"
|
enable_parameters: "negative_prompt,num_inference_steps,clip_skip"
|
||||||
scheduler_type: "k_dpmpp_sde"
|
scheduler_type: "k_dpmpp_sde"
|
||||||
cfg_scale: 8
|
|
||||||
clip_skip: 11
|
clip_skip: 11
|
||||||
|
|
||||||
|
cfg_scale: 8
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Configuration parameters
|
#### Configuration parameters
|
||||||
|
@ -302,7 +303,8 @@ cuda: true
|
||||||
diffusers:
|
diffusers:
|
||||||
pipeline_type: StableDiffusionDepth2ImgPipeline
|
pipeline_type: StableDiffusionDepth2ImgPipeline
|
||||||
enable_parameters: "negative_prompt,num_inference_steps,image"
|
enable_parameters: "negative_prompt,num_inference_steps,image"
|
||||||
cfg_scale: 6
|
|
||||||
|
cfg_scale: 6
|
||||||
```
|
```
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -11,4 +11,5 @@ config_file: |
|
||||||
cuda: true
|
cuda: true
|
||||||
enable_parameters: num_inference_steps
|
enable_parameters: num_inference_steps
|
||||||
pipeline_type: FluxPipeline
|
pipeline_type: FluxPipeline
|
||||||
cfg_scale: 0
|
|
||||||
|
cfg_scale: 0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue