Add logger dependency and implement PromptBuilder for workflow management

This commit is contained in:
Menno van Leeuwen 2025-03-21 11:51:35 +01:00
parent f45572532f
commit 8dad2b57b3
Signed by: vleeuwenmenno
SSH Key Fingerprint: SHA256:OJFmjANpakwD3F2Rsws4GLtbdz1TJ5tkQF0RZmF0TRE
6 changed files with 699 additions and 2 deletions

595
example/object_info.json Normal file
View File

@ -0,0 +1,595 @@
{
"KSampler": {
"input": {
"required": {
"model": [
"MODEL",
{
"tooltip": "The model used for denoising the input latent."
}
],
"seed": [
"INT",
{
"default": 0,
"min": 0,
"max": 18446744073709551615,
"control_after_generate": true,
"tooltip": "The random seed used for creating the noise."
}
],
"steps": [
"INT",
{
"default": 20,
"min": 1,
"max": 10000,
"tooltip": "The number of steps used in the denoising process."
}
],
"cfg": [
"FLOAT",
{
"default": 8.0,
"min": 0.0,
"max": 100.0,
"step": 0.1,
"round": 0.01,
"tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."
}
],
"sampler_name": [
[
"euler",
"euler_cfg_pp",
"euler_ancestral",
"euler_ancestral_cfg_pp",
"heun",
"heunpp2",
"dpm_2",
"dpm_2_ancestral",
"lms",
"dpm_fast",
"dpm_adaptive",
"dpmpp_2s_ancestral",
"dpmpp_2s_ancestral_cfg_pp",
"dpmpp_sde",
"dpmpp_sde_gpu",
"dpmpp_2m",
"dpmpp_2m_cfg_pp",
"dpmpp_2m_sde",
"dpmpp_2m_sde_gpu",
"dpmpp_3m_sde",
"dpmpp_3m_sde_gpu",
"ddpm",
"lcm",
"ipndm",
"ipndm_v",
"deis",
"res_multistep",
"res_multistep_cfg_pp",
"res_multistep_ancestral",
"res_multistep_ancestral_cfg_pp",
"gradient_estimation",
"er_sde",
"ddim",
"uni_pc",
"uni_pc_bh2"
],
{
"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."
}
],
"scheduler": [
[
"normal",
"karras",
"exponential",
"sgm_uniform",
"simple",
"ddim_uniform",
"beta",
"linear_quadratic",
"kl_optimal"
],
{
"tooltip": "The scheduler controls how noise is gradually removed to form the image."
}
],
"positive": [
"CONDITIONING",
{
"tooltip": "The conditioning describing the attributes you want to include in the image."
}
],
"negative": [
"CONDITIONING",
{
"tooltip": "The conditioning describing the attributes you want to exclude from the image."
}
],
"latent_image": [
"LATENT",
{
"tooltip": "The latent image to denoise."
}
],
"denoise": [
"FLOAT",
{
"default": 1.0,
"min": 0.0,
"max": 1.0,
"step": 0.01,
"tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."
}
]
}
},
"input_order": {
"required": [
"model",
"seed",
"steps",
"cfg",
"sampler_name",
"scheduler",
"positive",
"negative",
"latent_image",
"denoise"
]
},
"output": [
"LATENT"
],
"output_is_list": [
false
],
"output_name": [
"LATENT"
],
"name": "KSampler",
"display_name": "KSampler",
"description": "Uses the provided model, positive and negative conditioning to denoise the latent image.",
"python_module": "nodes",
"category": "sampling",
"output_node": false,
"output_tooltips": [
"The denoised latent."
]
},
"CheckpointLoaderSimple": {
"input": {
"required": {
"ckpt_name": [
[
"Anime/autismmixSDXL_autismmixConfetti.safetensors",
"Anime/autismmixSDXL_autismmixPony.safetensors",
"Anime/ponyDiffusionV6XL_v6StartWithThisOne.safetensors",
"Anime/prefectPonyXL_v50.safetensors",
"Anime/waiANINSFWPONYXL_v11.safetensors",
"Anime/waiANINSFWPONYXL_v130.safetensors",
"Anime/waiNSFWIllustrious_v70.safetensors",
"Babes/babesBYSTABLEYOGI_xlV2.safetensors",
"Babes/babesByStableYogi_ponyV3VAE.safetensors",
"FLUX/flux1-dev-fp8.safetensors",
"RDXL/rdxlAnime_sdxlPony8.safetensors",
"RDXL/rdxlPixelArt_pony2.safetensors",
"RDXL/realDream_sdxlPony12.safetensors",
"Realism/cyberrealisticPony_v70a.safetensors",
"Realism/cyberrealisticPony_v8.safetensors",
"Realism/realvisxlV50_v50Bakedvae.safetensors",
"SD3.5/sd3.5_large_fp16.safetensors",
"SD3.5/sd3.5_large_fp8_scaled.safetensors",
"Semi-realism/bemypony_Semirealanime.safetensors",
"Semi-realism/duchaitenPonyXLNo_v60.safetensors",
"prefectPonyXL_v3.safetensors",
"sd-v1-5-inpainting.ckpt",
"v1-5-pruned-emaonly.ckpt"
],
{
"tooltip": "The name of the checkpoint (model) to load."
}
]
}
},
"input_order": {
"required": [
"ckpt_name"
]
},
"output": [
"MODEL",
"CLIP",
"VAE"
],
"output_is_list": [
false,
false,
false
],
"output_name": [
"MODEL",
"CLIP",
"VAE"
],
"name": "CheckpointLoaderSimple",
"display_name": "Load Checkpoint",
"description": "Loads a diffusion model checkpoint, diffusion models are used to denoise latents.",
"python_module": "nodes",
"category": "loaders",
"output_node": false,
"output_tooltips": [
"The model used for denoising latents.",
"The CLIP model used for encoding text prompts.",
"The VAE model used for encoding and decoding images to and from latent space."
]
},
"CLIPTextEncode": {
"input": {
"required": {
"text": [
"STRING",
{
"multiline": true,
"dynamicPrompts": true,
"tooltip": "The text to be encoded."
}
],
"clip": [
"CLIP",
{
"tooltip": "The CLIP model used for encoding the text."
}
]
}
},
"input_order": {
"required": [
"text",
"clip"
]
},
"output": [
"CONDITIONING"
],
"output_is_list": [
false
],
"output_name": [
"CONDITIONING"
],
"name": "CLIPTextEncode",
"display_name": "CLIP Text Encode (Prompt)",
"description": "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images.",
"python_module": "nodes",
"category": "conditioning",
"output_node": false,
"output_tooltips": [
"A conditioning containing the embedded text used to guide the diffusion model."
]
},
"CLIPSetLastLayer": {
"input": {
"required": {
"clip": [
"CLIP"
],
"stop_at_clip_layer": [
"INT",
{
"default": -1,
"min": -24,
"max": -1,
"step": 1
}
]
}
},
"input_order": {
"required": [
"clip",
"stop_at_clip_layer"
]
},
"output": [
"CLIP"
],
"output_is_list": [
false
],
"output_name": [
"CLIP"
],
"name": "CLIPSetLastLayer",
"display_name": "CLIP Set Last Layer",
"description": "",
"python_module": "nodes",
"category": "conditioning",
"output_node": false
},
"VAEDecode": {
"input": {
"required": {
"samples": [
"LATENT",
{
"tooltip": "The latent to be decoded."
}
],
"vae": [
"VAE",
{
"tooltip": "The VAE model used for decoding the latent."
}
]
}
},
"input_order": {
"required": [
"samples",
"vae"
]
},
"output": [
"IMAGE"
],
"output_is_list": [
false
],
"output_name": [
"IMAGE"
],
"name": "VAEDecode",
"display_name": "VAE Decode",
"description": "Decodes latent images back into pixel space images.",
"python_module": "nodes",
"category": "latent",
"output_node": false,
"output_tooltips": [
"The decoded image."
]
},
"VAEEncode": {
"input": {
"required": {
"pixels": [
"IMAGE"
],
"vae": [
"VAE"
]
}
},
"input_order": {
"required": [
"pixels",
"vae"
]
},
"output": [
"LATENT"
],
"output_is_list": [
false
],
"output_name": [
"LATENT"
],
"name": "VAEEncode",
"display_name": "VAE Encode",
"description": "",
"python_module": "nodes",
"category": "latent",
"output_node": false
},
"VAELoader": {
"input": {
"required": {
"vae_name": [
[
"ae.safetensors",
"sdxl_vae.safetensors",
"vae-ft-mse-840000-ema-pruned.ckpt"
]
]
}
},
"input_order": {
"required": [
"vae_name"
]
},
"output": [
"VAE"
],
"output_is_list": [
false
],
"output_name": [
"VAE"
],
"name": "VAELoader",
"display_name": "Load VAE",
"description": "",
"python_module": "nodes",
"category": "loaders",
"output_node": false
},
"EmptyLatentImage": {
"input": {
"required": {
"width": [
"INT",
{
"default": 512,
"min": 16,
"max": 16384,
"step": 8,
"tooltip": "The width of the latent images in pixels."
}
],
"height": [
"INT",
{
"default": 512,
"min": 16,
"max": 16384,
"step": 8,
"tooltip": "The height of the latent images in pixels."
}
],
"batch_size": [
"INT",
{
"default": 1,
"min": 1,
"max": 4096,
"tooltip": "The number of latent images in the batch."
}
]
}
},
"input_order": {
"required": [
"width",
"height",
"batch_size"
]
},
"output": [
"LATENT"
],
"output_is_list": [
false
],
"output_name": [
"LATENT"
],
"name": "EmptyLatentImage",
"display_name": "Empty Latent Image",
"description": "Create a new batch of empty latent images to be denoised via sampling.",
"python_module": "nodes",
"category": "latent",
"output_node": false,
"output_tooltips": [
"The empty latent image batch."
]
},
"LatentUpscale": {
"input": {
"required": {
"samples": [
"LATENT"
],
"upscale_method": [
[
"nearest-exact",
"bilinear",
"area",
"bicubic",
"bislerp"
]
],
"width": [
"INT",
{
"default": 512,
"min": 0,
"max": 16384,
"step": 8
}
],
"height": [
"INT",
{
"default": 512,
"min": 0,
"max": 16384,
"step": 8
}
],
"crop": [
[
"disabled",
"center"
]
]
}
},
"input_order": {
"required": [
"samples",
"upscale_method",
"width",
"height",
"crop"
]
},
"output": [
"LATENT"
],
"output_is_list": [
false
],
"output_name": [
"LATENT"
],
"name": "LatentUpscale",
"display_name": "Upscale Latent",
"description": "",
"python_module": "nodes",
"category": "latent",
"output_node": false
},
"LatentUpscaleBy": {
"input": {
"required": {
"samples": [
"LATENT"
],
"upscale_method": [
[
"nearest-exact",
"bilinear",
"area",
"bicubic",
"bislerp"
]
],
"scale_by": [
"FLOAT",
{
"default": 1.5,
"min": 0.01,
"max": 8.0,
"step": 0.01
}
]
}
},
"input_order": {
"required": [
"samples",
"upscale_method",
"scale_by"
]
},
"output": [
"LATENT"
],
"output_is_list": [
false
],
"output_name": [
"LATENT"
],
"name": "LatentUpscaleBy",
"display_name": "Upscale Latent By",
"description": "",
"python_module": "nodes",
"category": "latent",
"output_node": false
}
}

View File

@ -4,3 +4,4 @@ export 'src/comfyui_api.dart';
export 'src/models/websocket_event.dart';
export 'src/models/progress_event.dart';
export 'src/models/execution_event.dart';
export 'src/prompt_builder.dart';

View File

@ -2,8 +2,8 @@ import 'dart:async';
import 'dart:convert';
import 'package:http/http.dart' as http;
import 'package:uuid/uuid.dart';
import 'package:logger/logger.dart';
import 'models/websocket_event.dart';
import 'models/progress_event.dart';
import 'models/execution_event.dart';
@ -22,14 +22,17 @@ class ComfyUiApi {
final String clientId;
final http.Client _httpClient;
final WebSocketManager _webSocketManager;
final Logger logger;
/// Creates a new ComfyUI API client
ComfyUiApi({
required this.host,
required this.clientId,
Logger? logger,
http.Client? httpClient,
}) : _httpClient = httpClient ?? http.Client(),
_webSocketManager = WebSocketManager(host: host, clientId: clientId);
_webSocketManager = WebSocketManager(host: host, clientId: clientId),
logger = logger ?? Logger();
/// Expose WebSocketManager streams and methods
Stream<WebSocketEvent> get events => _webSocketManager.events;

View File

@ -0,0 +1,89 @@
class PromptBuilder {
final String clientId;
final Map<String, dynamic> _nodes = {};
final Map<String, Map<String, String>> _outputToNode =
{}; // Maps output tags to node IDs
int _nodeIdCounter = 1;
PromptBuilder({required this.clientId});
/// Adds a node to the workflow
void addNode(String classType, Map<String, dynamic> inputs,
{String? title, Map<String, String>? outputTags}) {
final nodeId = _nodeIdCounter.toString();
// Resolve dependencies for inputs
inputs.forEach((key, value) {
if (value is List && value.length == 2 && value[1] is int) {
final outputTag = value[0];
if (_outputToNode.containsKey(outputTag)) {
inputs[key] = [_outputToNode[outputTag]!["nodeId"], value[1]];
} else {
throw Exception(
"Unresolved dependency: No node provides output tag '$outputTag'");
}
}
});
_nodes[nodeId] = {
"inputs": inputs,
"class_type": classType,
"_meta": {"title": title ?? classType}
};
// Register outputs of this node with optional tags
final defaultOutputs = _getDefaultOutputs(classType);
for (var i = 0; i < defaultOutputs.length; i++) {
final outputTag = outputTags?[defaultOutputs[i]] ?? defaultOutputs[i];
_outputToNode[outputTag] = {
"nodeId": nodeId,
"outputIndex": i.toString()
};
}
_nodeIdCounter++;
}
/// Generates the final workflow map
Map<String, dynamic> build() {
return {
"client_id": clientId,
"prompt": _nodes,
};
}
/// Validates the workflow against object info (optional)
void validate(Map<String, dynamic> objectInfo) {
for (var nodeId in _nodes.keys) {
final node = _nodes[nodeId];
final classType = node["class_type"];
if (!objectInfo.containsKey(classType)) {
throw Exception("Invalid node class type: $classType");
}
final requiredInputs = objectInfo[classType]["input"]["required"];
for (var inputKey in requiredInputs.keys) {
if (!node["inputs"].containsKey(inputKey)) {
throw Exception(
"Missing required input '$inputKey' for node $nodeId");
}
}
}
}
/// Helper method to get default outputs for a class type
List<String> _getDefaultOutputs(String classType) {
if (classType == "CheckpointLoaderSimple") {
return ["MODEL", "CLIP", "VAE"];
} else if (classType == "CLIPTextEncode") {
return ["CONDITIONING"];
} else if (classType == "EmptyLatentImage" ||
classType == "KSampler" ||
classType == "LatentUpscale" ||
classType == "LatentUpscaleBy") {
return ["LATENT"];
} else if (classType == "VAEDecode") {
return ["IMAGE"];
}
return [];
}
}

View File

@ -281,6 +281,14 @@ packages:
url: "https://pub.dev"
source: hosted
version: "5.1.1"
logger:
dependency: "direct main"
description:
name: logger
sha256: be4b23575aac7ebf01f225a241eb7f6b5641eeaf43c6a8613510fc2f8cf187d1
url: "https://pub.dev"
source: hosted
version: "2.5.0"
logging:
dependency: transitive
description:

View File

@ -11,6 +11,7 @@ dependencies:
uuid: ^4.5.1
json_annotation: ^4.9.0
freezed_annotation: ^3.0.0
logger: ^2.5.0
dev_dependencies:
lints: ^5.1.1