mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-07-27 08:16:44 +00:00
Add the execution model tests to CI
This commit is contained in:
parent
46c8311d14
commit
50bf72f852
4
.github/workflows/test-unit.yml
vendored
4
.github/workflows/test-unit.yml
vendored
@ -28,3 +28,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
pip install -r tests-unit/requirements.txt
|
pip install -r tests-unit/requirements.txt
|
||||||
python -m pytest tests-unit
|
python -m pytest tests-unit
|
||||||
|
- name: Run Execution Model Tests
|
||||||
|
run: |
|
||||||
|
python -m pytest tests/inference/test_execution.py
|
||||||
|
|
||||||
|
@ -500,59 +500,59 @@ class TestExecution:
|
|||||||
def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder):
|
def test_parallel_sleep_nodes(self, client: ComfyClient, builder: GraphBuilder):
|
||||||
g = builder
|
g = builder
|
||||||
image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
image = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||||
|
|
||||||
# Create sleep nodes for each duration
|
# Create sleep nodes for each duration
|
||||||
sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.8)
|
sleep_node1 = g.node("TestSleep", value=image.out(0), seconds=2.8)
|
||||||
sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=2.9)
|
sleep_node2 = g.node("TestSleep", value=image.out(0), seconds=2.9)
|
||||||
sleep_node3 = g.node("TestSleep", value=image.out(0), seconds=3.0)
|
sleep_node3 = g.node("TestSleep", value=image.out(0), seconds=3.0)
|
||||||
|
|
||||||
# Add outputs to verify the execution
|
# Add outputs to verify the execution
|
||||||
output1 = g.node("PreviewImage", images=sleep_node1.out(0))
|
_output1 = g.node("PreviewImage", images=sleep_node1.out(0))
|
||||||
output2 = g.node("PreviewImage", images=sleep_node2.out(0))
|
_output2 = g.node("PreviewImage", images=sleep_node2.out(0))
|
||||||
output3 = g.node("PreviewImage", images=sleep_node3.out(0))
|
_output3 = g.node("PreviewImage", images=sleep_node3.out(0))
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
result = client.run(g)
|
result = client.run(g)
|
||||||
elapsed_time = time.time() - start_time
|
elapsed_time = time.time() - start_time
|
||||||
|
|
||||||
# The test should take around 0.4 seconds (the longest sleep duration)
|
# The test should take around 0.4 seconds (the longest sleep duration)
|
||||||
# plus some overhead, but definitely less than the sum of all sleeps (0.9s)
|
# plus some overhead, but definitely less than the sum of all sleeps (0.9s)
|
||||||
# We'll allow for up to 0.8s total to account for overhead
|
# We'll allow for up to 0.8s total to account for overhead
|
||||||
assert elapsed_time < 4.0, f"Parallel execution took {elapsed_time}s, expected less than 0.8s"
|
assert elapsed_time < 4.0, f"Parallel execution took {elapsed_time}s, expected less than 0.8s"
|
||||||
|
|
||||||
# Verify that all nodes executed
|
# Verify that all nodes executed
|
||||||
assert result.did_run(sleep_node1), "Sleep node 1 should have run"
|
assert result.did_run(sleep_node1), "Sleep node 1 should have run"
|
||||||
assert result.did_run(sleep_node2), "Sleep node 2 should have run"
|
assert result.did_run(sleep_node2), "Sleep node 2 should have run"
|
||||||
assert result.did_run(sleep_node3), "Sleep node 3 should have run"
|
assert result.did_run(sleep_node3), "Sleep node 3 should have run"
|
||||||
|
|
||||||
def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder):
|
def test_parallel_sleep_expansion(self, client: ComfyClient, builder: GraphBuilder):
|
||||||
g = builder
|
g = builder
|
||||||
# Create input images with different values
|
# Create input images with different values
|
||||||
image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
image1 = g.node("StubImage", content="BLACK", height=512, width=512, batch_size=1)
|
||||||
image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
image2 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||||
image3 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
image3 = g.node("StubImage", content="WHITE", height=512, width=512, batch_size=1)
|
||||||
|
|
||||||
# Create a TestParallelSleep node that expands into multiple TestSleep nodes
|
# Create a TestParallelSleep node that expands into multiple TestSleep nodes
|
||||||
parallel_sleep = g.node("TestParallelSleep",
|
parallel_sleep = g.node("TestParallelSleep",
|
||||||
image1=image1.out(0),
|
image1=image1.out(0),
|
||||||
image2=image2.out(0),
|
image2=image2.out(0),
|
||||||
image3=image3.out(0),
|
image3=image3.out(0),
|
||||||
sleep1=0.4,
|
sleep1=0.4,
|
||||||
sleep2=0.5,
|
sleep2=0.5,
|
||||||
sleep3=0.6)
|
sleep3=0.6)
|
||||||
output = g.node("SaveImage", images=parallel_sleep.out(0))
|
output = g.node("SaveImage", images=parallel_sleep.out(0))
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
result = client.run(g)
|
result = client.run(g)
|
||||||
elapsed_time = time.time() - start_time
|
elapsed_time = time.time() - start_time
|
||||||
|
|
||||||
# Similar to the previous test, expect parallel execution of the sleep nodes
|
# Similar to the previous test, expect parallel execution of the sleep nodes
|
||||||
# which should complete in less than the sum of all sleeps
|
# which should complete in less than the sum of all sleeps
|
||||||
assert elapsed_time < 0.8, f"Expansion execution took {elapsed_time}s, expected less than 0.8s"
|
assert elapsed_time < 0.8, f"Expansion execution took {elapsed_time}s, expected less than 0.8s"
|
||||||
|
|
||||||
# Verify the parallel sleep node executed
|
# Verify the parallel sleep node executed
|
||||||
assert result.did_run(parallel_sleep), "ParallelSleep node should have run"
|
assert result.did_run(parallel_sleep), "ParallelSleep node should have run"
|
||||||
|
|
||||||
# Verify we get an image as output (blend of the three input images)
|
# Verify we get an image as output (blend of the three input images)
|
||||||
result_images = result.get_images(output)
|
result_images = result.get_images(output)
|
||||||
assert len(result_images) == 1, "Should have 1 image"
|
assert len(result_images) == 1, "Should have 1 image"
|
||||||
|
@ -361,19 +361,19 @@ class TestSamplingInExpansion:
|
|||||||
|
|
||||||
def sampling_in_expansion(self, model, clip, vae, seed, steps, cfg, prompt, negative_prompt):
|
def sampling_in_expansion(self, model, clip, vae, seed, steps, cfg, prompt, negative_prompt):
|
||||||
g = GraphBuilder()
|
g = GraphBuilder()
|
||||||
|
|
||||||
# Create a basic image generation workflow using the input model, clip and vae
|
# Create a basic image generation workflow using the input model, clip and vae
|
||||||
# 1. Setup text prompts using the provided CLIP model
|
# 1. Setup text prompts using the provided CLIP model
|
||||||
positive_prompt = g.node("CLIPTextEncode",
|
positive_prompt = g.node("CLIPTextEncode",
|
||||||
text=prompt,
|
text=prompt,
|
||||||
clip=clip)
|
clip=clip)
|
||||||
negative_prompt = g.node("CLIPTextEncode",
|
negative_prompt = g.node("CLIPTextEncode",
|
||||||
text=negative_prompt,
|
text=negative_prompt,
|
||||||
clip=clip)
|
clip=clip)
|
||||||
|
|
||||||
# 2. Create empty latent with specified size
|
# 2. Create empty latent with specified size
|
||||||
empty_latent = g.node("EmptyLatentImage", width=512, height=512, batch_size=1)
|
empty_latent = g.node("EmptyLatentImage", width=512, height=512, batch_size=1)
|
||||||
|
|
||||||
# 3. Setup sampler and generate image latent
|
# 3. Setup sampler and generate image latent
|
||||||
sampler = g.node("KSampler",
|
sampler = g.node("KSampler",
|
||||||
model=model,
|
model=model,
|
||||||
@ -385,10 +385,10 @@ class TestSamplingInExpansion:
|
|||||||
cfg=cfg,
|
cfg=cfg,
|
||||||
sampler_name="euler_ancestral",
|
sampler_name="euler_ancestral",
|
||||||
scheduler="normal")
|
scheduler="normal")
|
||||||
|
|
||||||
# 4. Decode latent to image using VAE
|
# 4. Decode latent to image using VAE
|
||||||
output = g.node("VAEDecode", samples=sampler.out(0), vae=vae)
|
output = g.node("VAEDecode", samples=sampler.out(0), vae=vae)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"result": (output.out(0),),
|
"result": (output.out(0),),
|
||||||
"expand": g.finalize(),
|
"expand": g.finalize(),
|
||||||
@ -446,18 +446,18 @@ class TestParallelSleep(ComfyNodeABC):
|
|||||||
def parallel_sleep(self, image1, image2, image3, sleep1, sleep2, sleep3, unique_id):
|
def parallel_sleep(self, image1, image2, image3, sleep1, sleep2, sleep3, unique_id):
|
||||||
# Create a graph dynamically with three TestSleep nodes
|
# Create a graph dynamically with three TestSleep nodes
|
||||||
g = GraphBuilder()
|
g = GraphBuilder()
|
||||||
|
|
||||||
# Create sleep nodes for each duration and image
|
# Create sleep nodes for each duration and image
|
||||||
sleep_node1 = g.node("TestSleep", value=image1, seconds=sleep1)
|
sleep_node1 = g.node("TestSleep", value=image1, seconds=sleep1)
|
||||||
sleep_node2 = g.node("TestSleep", value=image2, seconds=sleep2)
|
sleep_node2 = g.node("TestSleep", value=image2, seconds=sleep2)
|
||||||
sleep_node3 = g.node("TestSleep", value=image3, seconds=sleep3)
|
sleep_node3 = g.node("TestSleep", value=image3, seconds=sleep3)
|
||||||
|
|
||||||
# Blend the results using TestVariadicAverage
|
# Blend the results using TestVariadicAverage
|
||||||
blend = g.node("TestVariadicAverage",
|
blend = g.node("TestVariadicAverage",
|
||||||
input1=sleep_node1.out(0),
|
input1=sleep_node1.out(0),
|
||||||
input2=sleep_node2.out(0),
|
input2=sleep_node2.out(0),
|
||||||
input3=sleep_node3.out(0))
|
input3=sleep_node3.out(0))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"result": (blend.out(0),),
|
"result": (blend.out(0),),
|
||||||
"expand": g.finalize(),
|
"expand": g.finalize(),
|
||||||
|
Loading…
x
Reference in New Issue
Block a user