第 16 屆 iThome 鐵人賽 (2023)
{%hackmd BJrTq20hE %}
和HED模型很類似 可是MLSD的再生成採樣不比HED準確,從下面的圖就能夠看到十分明顯的區別,基本上除了室內空間一樣,其他物件基本上是完全不同。
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
from controlnet_aux import MLSDdetector
from diffusers.utils import load_image
mlsd = MLSDdetector.from_pretrained('lllyasviel/ControlNet')
image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-mlsd/resolve/main/images/room.png")
image = mlsd(image)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-mlsd", torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
# Remove if you do not have xformers installed
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
# for installation instructions
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()
image = pipe("room", image, num_inference_steps=20).images[0]
image.save('images/room_mlsd_out.png')
https://huggingface.co/lllyasviel/sd-controlnet-mlsd