{ "KSampler": { "input": { "required": { "model": [ "MODEL", { "tooltip": "The model used for denoising the input latent." } ], "seed": [ "INT", { "default": 0, "min": 0, "max": 18446744073709551615, "control_after_generate": true, "tooltip": "The random seed used for creating the noise." } ], "steps": [ "INT", { "default": 20, "min": 1, "max": 10000, "tooltip": "The number of steps used in the denoising process." } ], "cfg": [ "FLOAT", { "default": 8.0, "min": 0.0, "max": 100.0, "step": 0.1, "round": 0.01, "tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality." } ], "sampler_name": [ [ "euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm", "ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp", "gradient_estimation", "er_sde", "ddim", "uni_pc", "uni_pc_bh2" ], { "tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output." } ], "scheduler": [ [ "normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "beta", "linear_quadratic", "kl_optimal" ], { "tooltip": "The scheduler controls how noise is gradually removed to form the image." } ], "positive": [ "CONDITIONING", { "tooltip": "The conditioning describing the attributes you want to include in the image." } ], "negative": [ "CONDITIONING", { "tooltip": "The conditioning describing the attributes you want to exclude from the image." } ], "latent_image": [ "LATENT", { "tooltip": "The latent image to denoise." } ], "denoise": [ "FLOAT", { "default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling." } ] } }, "input_order": { "required": [ "model", "seed", "steps", "cfg", "sampler_name", "scheduler", "positive", "negative", "latent_image", "denoise" ] }, "output": [ "LATENT" ], "output_is_list": [ false ], "output_name": [ "LATENT" ], "name": "KSampler", "display_name": "KSampler", "description": "Uses the provided model, positive and negative conditioning to denoise the latent image.", "python_module": "nodes", "category": "sampling", "output_node": false, "output_tooltips": [ "The denoised latent." ] }, "CheckpointLoaderSimple": { "input": { "required": { "ckpt_name": [ [ "Anime/autismmixSDXL_autismmixConfetti.safetensors", "Anime/autismmixSDXL_autismmixPony.safetensors", "Anime/ponyDiffusionV6XL_v6StartWithThisOne.safetensors", "Anime/prefectPonyXL_v50.safetensors", "Anime/waiANINSFWPONYXL_v11.safetensors", "Anime/waiANINSFWPONYXL_v130.safetensors", "Anime/waiNSFWIllustrious_v70.safetensors", "Babes/babesBYSTABLEYOGI_xlV2.safetensors", "Babes/babesByStableYogi_ponyV3VAE.safetensors", "FLUX/flux1-dev-fp8.safetensors", "RDXL/rdxlAnime_sdxlPony8.safetensors", "RDXL/rdxlPixelArt_pony2.safetensors", "RDXL/realDream_sdxlPony12.safetensors", "Realism/cyberrealisticPony_v70a.safetensors", "Realism/cyberrealisticPony_v8.safetensors", "Realism/realvisxlV50_v50Bakedvae.safetensors", "SD3.5/sd3.5_large_fp16.safetensors", "SD3.5/sd3.5_large_fp8_scaled.safetensors", "Semi-realism/bemypony_Semirealanime.safetensors", "Semi-realism/duchaitenPonyXLNo_v60.safetensors", "prefectPonyXL_v3.safetensors", "sd-v1-5-inpainting.ckpt", "v1-5-pruned-emaonly.ckpt" ], { "tooltip": "The name of the checkpoint (model) to load." } ] } }, "input_order": { "required": [ "ckpt_name" ] }, "output": [ "MODEL", "CLIP", "VAE" ], "output_is_list": [ false, false, false ], "output_name": [ "MODEL", "CLIP", "VAE" ], "name": "CheckpointLoaderSimple", "display_name": "Load Checkpoint", "description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.", "python_module": "nodes", "category": "loaders", "output_node": false, "output_tooltips": [ "The model used for denoising latents.", "The CLIP model used for encoding text prompts.", "The VAE model used for encoding and decoding images to and from latent space." ] }, "CLIPTextEncode": { "input": { "required": { "text": [ "STRING", { "multiline": true, "dynamicPrompts": true, "tooltip": "The text to be encoded." } ], "clip": [ "CLIP", { "tooltip": "The CLIP model used for encoding the text." } ] } }, "input_order": { "required": [ "text", "clip" ] }, "output": [ "CONDITIONING" ], "output_is_list": [ false ], "output_name": [ "CONDITIONING" ], "name": "CLIPTextEncode", "display_name": "CLIP Text Encode (Prompt)", "description": "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.", "python_module": "nodes", "category": "conditioning", "output_node": false, "output_tooltips": [ "A conditioning containing the embedded text used to guide the diffusion model." ] }, "CLIPSetLastLayer": { "input": { "required": { "clip": [ "CLIP" ], "stop_at_clip_layer": [ "INT", { "default": -1, "min": -24, "max": -1, "step": 1 } ] } }, "input_order": { "required": [ "clip", "stop_at_clip_layer" ] }, "output": [ "CLIP" ], "output_is_list": [ false ], "output_name": [ "CLIP" ], "name": "CLIPSetLastLayer", "display_name": "CLIP Set Last Layer", "description": "", "python_module": "nodes", "category": "conditioning", "output_node": false }, "VAEDecode": { "input": { "required": { "samples": [ "LATENT", { "tooltip": "The latent to be decoded." } ], "vae": [ "VAE", { "tooltip": "The VAE model used for decoding the latent." } ] } }, "input_order": { "required": [ "samples", "vae" ] }, "output": [ "IMAGE" ], "output_is_list": [ false ], "output_name": [ "IMAGE" ], "name": "VAEDecode", "display_name": "VAE Decode", "description": "Decodes latent images back into pixel space images.", "python_module": "nodes", "category": "latent", "output_node": false, "output_tooltips": [ "The decoded image." ] }, "VAEEncode": { "input": { "required": { "pixels": [ "IMAGE" ], "vae": [ "VAE" ] } }, "input_order": { "required": [ "pixels", "vae" ] }, "output": [ "LATENT" ], "output_is_list": [ false ], "output_name": [ "LATENT" ], "name": "VAEEncode", "display_name": "VAE Encode", "description": "", "python_module": "nodes", "category": "latent", "output_node": false }, "VAELoader": { "input": { "required": { "vae_name": [ [ "ae.safetensors", "sdxl_vae.safetensors", "vae-ft-mse-840000-ema-pruned.ckpt" ] ] } }, "input_order": { "required": [ "vae_name" ] }, "output": [ "VAE" ], "output_is_list": [ false ], "output_name": [ "VAE" ], "name": "VAELoader", "display_name": "Load VAE", "description": "", "python_module": "nodes", "category": "loaders", "output_node": false }, "EmptyLatentImage": { "input": { "required": { "width": [ "INT", { "default": 512, "min": 16, "max": 16384, "step": 8, "tooltip": "The width of the latent images in pixels." } ], "height": [ "INT", { "default": 512, "min": 16, "max": 16384, "step": 8, "tooltip": "The height of the latent images in pixels." } ], "batch_size": [ "INT", { "default": 1, "min": 1, "max": 4096, "tooltip": "The number of latent images in the batch." } ] } }, "input_order": { "required": [ "width", "height", "batch_size" ] }, "output": [ "LATENT" ], "output_is_list": [ false ], "output_name": [ "LATENT" ], "name": "EmptyLatentImage", "display_name": "Empty Latent Image", "description": "Create a new batch of empty latent images to be denoised via sampling.", "python_module": "nodes", "category": "latent", "output_node": false, "output_tooltips": [ "The empty latent image batch." ] }, "LatentUpscale": { "input": { "required": { "samples": [ "LATENT" ], "upscale_method": [ [ "nearest-exact", "bilinear", "area", "bicubic", "bislerp" ] ], "width": [ "INT", { "default": 512, "min": 0, "max": 16384, "step": 8 } ], "height": [ "INT", { "default": 512, "min": 0, "max": 16384, "step": 8 } ], "crop": [ [ "disabled", "center" ] ] } }, "input_order": { "required": [ "samples", "upscale_method", "width", "height", "crop" ] }, "output": [ "LATENT" ], "output_is_list": [ false ], "output_name": [ "LATENT" ], "name": "LatentUpscale", "display_name": "Upscale Latent", "description": "", "python_module": "nodes", "category": "latent", "output_node": false }, "LatentUpscaleBy": { "input": { "required": { "samples": [ "LATENT" ], "upscale_method": [ [ "nearest-exact", "bilinear", "area", "bicubic", "bislerp" ] ], "scale_by": [ "FLOAT", { "default": 1.5, "min": 0.01, "max": 8.0, "step": 0.01 } ] } }, "input_order": { "required": [ "samples", "upscale_method", "scale_by" ] }, "output": [ "LATENT" ], "output_is_list": [ false ], "output_name": [ "LATENT" ], "name": "LatentUpscaleBy", "display_name": "Upscale Latent By", "description": "", "python_module": "nodes", "category": "latent", "output_node": false } }