pub struct LightningModelsInpaintingInput {Show 19 fields
pub embeddings: Option<Vec<Option<Embedding>>>,
pub enable_safety_checker: Option<bool>,
pub expand_prompt: Option<bool>,
pub format: Option<String>,
pub guidance_scale: Option<f64>,
pub image_size: Option<ImageSizeProperty>,
pub image_url: String,
pub loras: Option<Vec<Option<LoraWeight>>>,
pub mask_url: String,
pub model_name: Option<String>,
pub negative_prompt: Option<String>,
pub num_images: Option<i64>,
pub num_inference_steps: Option<i64>,
pub prompt: String,
pub safety_checker_version: Option<String>,
pub scheduler: Option<String>,
pub seed: Option<i64>,
pub strength: Option<f64>,
pub sync_mode: Option<bool>,
}Fields§
§embeddings: Option<Vec<Option<Embedding>>>The list of embeddings to use.
enable_safety_checker: Option<bool>If set to true, the safety checker will be enabled.
expand_prompt: Option<bool>If set to true, the prompt will be expanded with additional prompts.
format: Option<String>The format of the generated image.
guidance_scale: Option<f64>The CFG (Classifier Free Guidance) scale is a measure of how close you want the model to stick to your prompt when looking for a related image to show you.
image_size: Option<ImageSizeProperty>§image_url: StringThe URL of the image to use as a starting point for the generation./// The URL of the image to use as a starting point for the generation./// “https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png”
loras: Option<Vec<Option<LoraWeight>>>The list of LoRA weights to use.
mask_url: StringThe URL of the mask to use for inpainting./// The URL of the mask to use for inpainting./// “https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png”
model_name: Option<String>The Lightning model to use./// The Lightning model to use./// “Lykon/dreamshaper-xl-lightning” “SG161222/RealVisXL_V4.0_Lightning”
negative_prompt: Option<String>The negative prompt to use.Use it to address details that you don’t want in the image. This could be colors, objects, scenery and even the small details (e.g. moustache, blurry, low resolution)./// The negative prompt to use.Use it to address details that you don’t want in the image. This could be colors, objects, scenery and even the small details (e.g. moustache, blurry, low resolution)./// “cartoon, illustration, animation. face. male, female”
num_images: Option<i64>The number of images to generate.
num_inference_steps: Option<i64>The number of inference steps to perform.
prompt: StringThe prompt to use for generating the image. Be as descriptive as possible for best results./// The prompt to use for generating the image. Be as descriptive as possible for best results./// “a tiger sitting on a park bench”
safety_checker_version: Option<String>The version of the safety checker to use. v1 is the default CompVis safety checker. v2 uses a custom ViT model.
scheduler: Option<String>Scheduler / sampler to use for the image denoising process.
seed: Option<i64>The same seed and the same prompt given to the same version of Stable Diffusion will output the same image every time.
strength: Option<f64>determines how much the generated image resembles the initial image
sync_mode: Option<bool>If set to true, the function will wait for the image to be generated and uploaded before returning the response. This will increase the latency of the function but it allows you to get the image directly in the response without going through the CDN.
Trait Implementations§
Source§impl Default for LightningModelsInpaintingInput
impl Default for LightningModelsInpaintingInput
Source§fn default() -> LightningModelsInpaintingInput
fn default() -> LightningModelsInpaintingInput
Source§impl<'de> Deserialize<'de> for LightningModelsInpaintingInput
impl<'de> Deserialize<'de> for LightningModelsInpaintingInput
Source§fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>where
__D: Deserializer<'de>,
Auto Trait Implementations§
impl Freeze for LightningModelsInpaintingInput
impl RefUnwindSafe for LightningModelsInpaintingInput
impl Send for LightningModelsInpaintingInput
impl Sync for LightningModelsInpaintingInput
impl Unpin for LightningModelsInpaintingInput
impl UnwindSafe for LightningModelsInpaintingInput
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<R, P> ReadPrimitive<R> for P
impl<R, P> ReadPrimitive<R> for P
Source§fn read_from_little_endian(read: &mut R) -> Result<Self, Error>
fn read_from_little_endian(read: &mut R) -> Result<Self, Error>
ReadEndian::read_from_little_endian().