415 lines
12 KiB
Dart
415 lines
12 KiB
Dart
class PromptBuilder {
|
|
String clientId;
|
|
final Map<String, dynamic> _nodes = {};
|
|
final Map<String, Map<String, dynamic>> _outputToNode = {};
|
|
int _nodeIdCounter = 1;
|
|
|
|
PromptBuilder({required this.clientId});
|
|
Map<String, dynamic> toJson() {
|
|
return {
|
|
"nodes": _nodes,
|
|
"outputToNode": _outputToNode,
|
|
"nodeIdCounter": _nodeIdCounter
|
|
};
|
|
}
|
|
|
|
void fromJson(Map<String, dynamic> json) {
|
|
_nodes.clear();
|
|
_outputToNode.clear();
|
|
_nodeIdCounter = json["nodeIdCounter"];
|
|
|
|
for (var entry in json["nodes"].entries) {
|
|
_nodes[entry.key] = entry.value;
|
|
}
|
|
|
|
for (var entry in json["outputToNode"].entries) {
|
|
_outputToNode[entry.key] = entry.value;
|
|
}
|
|
}
|
|
|
|
/// Adds a node to the workflow and returns its nodeId
|
|
String addNode(String classType, Map<String, dynamic> inputs,
|
|
{String? title, Map<String, String>? outputTags}) {
|
|
final nodeId = _nodeIdCounter.toString();
|
|
|
|
_nodes[nodeId] = {
|
|
"inputs": inputs,
|
|
"class_type": classType,
|
|
"_meta": {"title": title ?? classType}
|
|
};
|
|
|
|
// Register outputs of this node with optional tags
|
|
final defaultOutputs = _getDefaultOutputs(classType);
|
|
for (var i = 0; i < defaultOutputs.length; i++) {
|
|
final outputTag = outputTags?[defaultOutputs[i]] ?? defaultOutputs[i];
|
|
_outputToNode[outputTag] = {
|
|
"nodeId": nodeId,
|
|
"outputIndex": i.toString()
|
|
};
|
|
}
|
|
|
|
_nodeIdCounter++;
|
|
return nodeId;
|
|
}
|
|
|
|
/// Provides access to the current nodes
|
|
Map<String, dynamic> get nodes => Map.unmodifiable(_nodes);
|
|
|
|
/// Edits an existing node in the workflow
|
|
void editNode(String nodeId,
|
|
{Map<String, dynamic>? newInputs, String? newTitle}) {
|
|
if (!_nodes.containsKey(nodeId)) {
|
|
throw Exception("Node with ID $nodeId does not exist.");
|
|
}
|
|
|
|
final node = _nodes[nodeId];
|
|
if (newInputs != null) {
|
|
node["inputs"] = newInputs; // Inputs will be resolved in build()
|
|
}
|
|
|
|
if (newTitle != null) {
|
|
node["_meta"]["title"] = newTitle;
|
|
}
|
|
}
|
|
|
|
/// Removes a node from the workflow
|
|
void removeNode(String nodeId) {
|
|
if (!_nodes.containsKey(nodeId)) {
|
|
throw Exception("Node with ID $nodeId does not exist.");
|
|
}
|
|
|
|
// Remove the node
|
|
_nodes.remove(nodeId);
|
|
|
|
// Remove associated outputs
|
|
_outputToNode.removeWhere((_, value) => value["nodeId"] == nodeId);
|
|
}
|
|
|
|
/// Reorders nodes in the workflow
|
|
void reorderNodes(List<String> newOrder) {
|
|
if (newOrder.length != _nodes.keys.length ||
|
|
!newOrder.every((id) => _nodes.containsKey(id))) {
|
|
throw Exception("Invalid new order: All node IDs must be included.");
|
|
}
|
|
|
|
final reorderedNodes = <String, dynamic>{};
|
|
for (var nodeId in newOrder) {
|
|
reorderedNodes[nodeId] = _nodes[nodeId];
|
|
}
|
|
_nodes
|
|
..clear()
|
|
..addAll(reorderedNodes);
|
|
}
|
|
|
|
/// Generates the final workflow map
|
|
Map<String, dynamic> build() {
|
|
final resolvedNodes = <String, dynamic>{};
|
|
|
|
_nodes.forEach((nodeId, node) {
|
|
final resolvedInputs = <String, dynamic>{};
|
|
|
|
// Resolve dependencies for inputs
|
|
node["inputs"].forEach((key, value) {
|
|
if (value is List && value.length == 2 && value[1] is int) {
|
|
final outputTag = value[0];
|
|
if (_outputToNode.containsKey(outputTag)) {
|
|
resolvedInputs[key] = [
|
|
_outputToNode[outputTag]!["nodeId"],
|
|
value[1]
|
|
];
|
|
} else {
|
|
throw Exception(
|
|
"Unresolved dependency: No node provides output tag '$outputTag'");
|
|
}
|
|
} else {
|
|
resolvedInputs[key] = value;
|
|
}
|
|
});
|
|
|
|
resolvedNodes[nodeId] = {
|
|
...node,
|
|
"inputs": resolvedInputs,
|
|
};
|
|
});
|
|
|
|
return {
|
|
"client_id": clientId,
|
|
"prompt": resolvedNodes,
|
|
};
|
|
}
|
|
|
|
/// Validates the workflow against object info (optional)
|
|
void validate(Map<String, dynamic> objectInfo) {
|
|
for (var nodeId in _nodes.keys) {
|
|
final node = _nodes[nodeId];
|
|
final classType = node["class_type"];
|
|
if (!objectInfo.containsKey(classType)) {
|
|
throw Exception("Invalid node class type: $classType");
|
|
}
|
|
final requiredInputs = objectInfo[classType]["input"]["required"];
|
|
for (var inputKey in requiredInputs.keys) {
|
|
if (!node["inputs"].containsKey(inputKey)) {
|
|
throw Exception(
|
|
"Missing required input '$inputKey' for node $nodeId");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Helper method to get default outputs for a class type
|
|
List<String> _getDefaultOutputs(String classType) {
|
|
switch (classType) {
|
|
case "KSampler":
|
|
return ["LATENT"];
|
|
case "CheckpointLoaderSimple":
|
|
return ["MODEL", "CLIP", "VAE"];
|
|
case "CLIPTextEncode":
|
|
return ["CONDITIONING"];
|
|
case "CLIPSetLastLayer":
|
|
return ["CLIP"];
|
|
case "VAEDecode":
|
|
return ["IMAGE"];
|
|
case "VAEEncode":
|
|
return ["LATENT"];
|
|
case "VAEEncodeForInpaint":
|
|
return ["LATENT"];
|
|
case "VAELoader":
|
|
return ["VAE"];
|
|
case "EmptyLatentImage":
|
|
return ["LATENT"];
|
|
case "LatentUpscale":
|
|
return ["LATENT"];
|
|
case "LatentUpscaleBy":
|
|
return ["LATENT"];
|
|
case "LatentFromBatch":
|
|
return ["LATENT"];
|
|
case "RepeatLatentBatch":
|
|
return ["LATENT"];
|
|
case "SaveImage":
|
|
return [];
|
|
case "PreviewImage":
|
|
return [];
|
|
case "LoadImage":
|
|
return ["IMAGE", "MASK"];
|
|
case "LoadImageMask":
|
|
return ["MASK"];
|
|
case "LoadImageOutput":
|
|
return ["IMAGE", "MASK"];
|
|
case "ImageScale":
|
|
return ["IMAGE"];
|
|
case "ImageScaleBy":
|
|
return ["IMAGE"];
|
|
case "ImageInvert":
|
|
return ["IMAGE"];
|
|
case "ImageBatch":
|
|
return ["IMAGE"];
|
|
case "ImagePadForOutpaint":
|
|
return ["IMAGE", "MASK"];
|
|
case "EmptyImage":
|
|
return ["IMAGE"];
|
|
case "ConditioningAverage":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningCombine":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningConcat":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningSetArea":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningSetAreaPercentage":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningSetAreaStrength":
|
|
return ["CONDITIONING"];
|
|
case "ConditioningSetMask":
|
|
return ["CONDITIONING"];
|
|
case "KSamplerAdvanced":
|
|
return ["LATENT"];
|
|
case "SetLatentNoiseMask":
|
|
return ["LATENT"];
|
|
case "LatentComposite":
|
|
return ["LATENT"];
|
|
case "LatentBlend":
|
|
return ["LATENT"];
|
|
case "LatentRotate":
|
|
return ["LATENT"];
|
|
case "LatentFlip":
|
|
return ["LATENT"];
|
|
case "LatentCrop":
|
|
return ["LATENT"];
|
|
case "LoraLoader":
|
|
return ["MODEL", "CLIP"];
|
|
case "CLIPLoader":
|
|
return ["CLIP"];
|
|
case "UNETLoader":
|
|
return ["MODEL"];
|
|
case "DualCLIPLoader":
|
|
return ["CLIP"];
|
|
case "CLIPVisionEncode":
|
|
return ["CLIP_VISION_OUTPUT"];
|
|
case "StyleModelApply":
|
|
return ["CONDITIONING"];
|
|
case "StyleModelLoader":
|
|
return ["STYLE_MODEL"];
|
|
case "CLIPVisionLoader":
|
|
return ["CLIP_VISION"];
|
|
case "VAEDecodeTiled":
|
|
return ["IMAGE"];
|
|
case "VAEEncodeTiled":
|
|
return ["LATENT"];
|
|
case "unCLIPCheckpointLoader":
|
|
return ["MODEL", "CLIP", "VAE", "CLIP_VISION"];
|
|
case "GLIGENLoader":
|
|
return ["GLIGEN"];
|
|
case "GLIGENTextBoxApply":
|
|
return ["CONDITIONING"];
|
|
case "InpaintModelConditioning":
|
|
return ["positive", "negative", "latent"];
|
|
case "LatentAdd":
|
|
return ["LATENT"];
|
|
case "LatentSubtract":
|
|
return ["LATENT"];
|
|
case "LatentMultiply":
|
|
return ["LATENT"];
|
|
case "LatentInterpolate":
|
|
return ["LATENT"];
|
|
case "LatentBatch":
|
|
return ["LATENT"];
|
|
case "LatentBatchSeedBehavior":
|
|
return ["LATENT"];
|
|
case "LatentApplyOperation":
|
|
return ["LATENT"];
|
|
case "LatentApplyOperationCFG":
|
|
return ["MODEL"];
|
|
case "LatentOperationTonemapReinhard":
|
|
return ["LATENT_OPERATION"];
|
|
case "LatentOperationSharpen":
|
|
return ["LATENT_OPERATION"];
|
|
case "HypernetworkLoader":
|
|
return ["MODEL"];
|
|
case "UpscaleModelLoader":
|
|
return ["UPSCALE_MODEL"];
|
|
case "ImageUpscaleWithModel":
|
|
return ["IMAGE"];
|
|
case "ImageBlend":
|
|
return ["IMAGE"];
|
|
case "ImageBlur":
|
|
return ["IMAGE"];
|
|
case "ImageQuantize":
|
|
return ["IMAGE"];
|
|
case "ImageSharpen":
|
|
return ["IMAGE"];
|
|
case "ImageScaleToTotalPixels":
|
|
return ["IMAGE"];
|
|
case "LatentCompositeMasked":
|
|
return ["LATENT"];
|
|
case "ImageCompositeMasked":
|
|
return ["IMAGE"];
|
|
case "MaskToImage":
|
|
return ["IMAGE"];
|
|
case "ImageToMask":
|
|
return ["MASK"];
|
|
case "ImageColorToMask":
|
|
return ["MASK"];
|
|
case "SolidMask":
|
|
return ["MASK"];
|
|
case "InvertMask":
|
|
return ["MASK"];
|
|
case "CropMask":
|
|
return ["MASK"];
|
|
case "MaskComposite":
|
|
return ["MASK"];
|
|
case "FeatherMask":
|
|
return ["MASK"];
|
|
case "GrowMask":
|
|
return ["MASK"];
|
|
case "ThresholdMask":
|
|
return ["MASK"];
|
|
case "PorterDuffImageComposite":
|
|
return ["IMAGE", "MASK"];
|
|
case "SplitImageWithAlpha":
|
|
return ["IMAGE", "MASK"];
|
|
case "JoinImageWithAlpha":
|
|
return ["IMAGE"];
|
|
case "RebatchLatents":
|
|
return ["LATENT"];
|
|
case "RebatchImages":
|
|
return ["IMAGE"];
|
|
case "ModelMergeSimple":
|
|
return ["MODEL"];
|
|
case "ModelMergeBlocks":
|
|
return ["MODEL"];
|
|
case "ModelMergeSubtract":
|
|
return ["MODEL"];
|
|
case "ModelMergeAdd":
|
|
return ["MODEL"];
|
|
case "CheckpointSave":
|
|
return [];
|
|
case "CLIPSave":
|
|
return [];
|
|
case "VAESave":
|
|
return [];
|
|
case "ModelSave":
|
|
return [];
|
|
case "TomePatchModel":
|
|
return ["MODEL"];
|
|
case "CLIPTextEncodeSDXLRefiner":
|
|
return ["CONDITIONING"];
|
|
case "CLIPTextEncodeSDXL":
|
|
return ["CONDITIONING"];
|
|
case "Canny":
|
|
return ["IMAGE"];
|
|
case "FreeU":
|
|
return ["MODEL"];
|
|
case "FreeU_V2":
|
|
return ["MODEL"];
|
|
case "SamplerCustom":
|
|
return ["LATENT", "LATENT"];
|
|
case "BasicScheduler":
|
|
return ["SIGMAS"];
|
|
case "KarrasScheduler":
|
|
return ["SIGMAS"];
|
|
case "ExponentialScheduler":
|
|
return ["SIGMAS"];
|
|
case "PolyexponentialScheduler":
|
|
return ["SIGMAS"];
|
|
case "LaplaceScheduler":
|
|
return ["SIGMAS"];
|
|
case "VPScheduler":
|
|
return ["SIGMAS"];
|
|
case "BetaSamplingScheduler":
|
|
return ["SIGMAS"];
|
|
case "SDTurboScheduler":
|
|
return ["SIGMAS"];
|
|
case "KSamplerSelect":
|
|
return ["SAMPLER"];
|
|
case "SamplerEulerAncestral":
|
|
return ["SAMPLER"];
|
|
case "SamplerEulerAncestralCFGPP":
|
|
return ["SAMPLER"];
|
|
case "SamplerLMS":
|
|
return ["SAMPLER"];
|
|
case "SamplerDPMPP_3M_SDE":
|
|
return ["SAMPLER"];
|
|
case "SamplerDPMPP_2M_SDE":
|
|
return ["SAMPLER"];
|
|
case "SamplerDPMPP_SDE":
|
|
return ["SAMPLER"];
|
|
case "SamplerDPMPP_2S_Ancestral":
|
|
return ["SAMPLER"];
|
|
case "SamplerDPMAdaptative":
|
|
return ["SAMPLER"];
|
|
case "SplitSigmas":
|
|
return ["SIGMAS", "SIGMAS"];
|
|
case "SplitSigmasDenoise":
|
|
return ["SIGMAS", "SIGMAS"];
|
|
case "FlipSigmas":
|
|
return ["SIGMAS"];
|
|
case "SetFirstSigma":
|
|
return ["SIGMAS"];
|
|
case "CFGGuider":
|
|
return ["GUIDER"];
|
|
default:
|
|
return [];
|
|
}
|
|
}
|
|
}
|