mirror of
https://github.com/finegrain-ai/refiners.git
synced 2024-11-09 23:12:02 +00:00
rollback to 50 inference steps in IP-Adapter tests
Follow up of 8a36c8c
This is what is used in the official notebook (ip_adapter_demo.ipynb and
ip_adapter-plus_demo.ipynb)
This commit is contained in:
parent
ed3621362f
commit
40c33b9595
|
@ -1236,7 +1236,7 @@ def test_diffusion_ip_adapter(
|
||||||
clip_image_embedding = ip_adapter.compute_clip_image_embedding(ip_adapter.preprocess_image(woman_image))
|
clip_image_embedding = ip_adapter.compute_clip_image_embedding(ip_adapter.preprocess_image(woman_image))
|
||||||
ip_adapter.set_clip_image_embedding(clip_image_embedding)
|
ip_adapter.set_clip_image_embedding(clip_image_embedding)
|
||||||
|
|
||||||
sd15.set_inference_steps(30)
|
sd15.set_inference_steps(50)
|
||||||
|
|
||||||
manual_seed(2)
|
manual_seed(2)
|
||||||
x = torch.randn(1, 4, 64, 64, device=test_device, dtype=torch.float16)
|
x = torch.randn(1, 4, 64, 64, device=test_device, dtype=torch.float16)
|
||||||
|
@ -1382,7 +1382,7 @@ def test_diffusion_ip_adapter_plus(
|
||||||
clip_image_embedding = ip_adapter.compute_clip_image_embedding(ip_adapter.preprocess_image(statue_image))
|
clip_image_embedding = ip_adapter.compute_clip_image_embedding(ip_adapter.preprocess_image(statue_image))
|
||||||
ip_adapter.set_clip_image_embedding(clip_image_embedding)
|
ip_adapter.set_clip_image_embedding(clip_image_embedding)
|
||||||
|
|
||||||
sd15.set_inference_steps(30)
|
sd15.set_inference_steps(50)
|
||||||
|
|
||||||
manual_seed(42) # seed=42 is used in the official IP-Adapter demo
|
manual_seed(42) # seed=42 is used in the official IP-Adapter demo
|
||||||
x = torch.randn(1, 4, 64, 64, device=test_device, dtype=torch.float16)
|
x = torch.randn(1, 4, 64, 64, device=test_device, dtype=torch.float16)
|
||||||
|
|
Loading…
Reference in a new issue