|
93 | 93 |
|
94 | 94 | # The only extra line you need
|
95 | 95 | pipe.unet = torch_trt.MutableTorchTensorRTModule(pipe.unet, **settings)
|
96 |
| - |
97 |
| - image = pipe(prompt, negative_prompt=negative, num_inference_steps=30).images[0] |
| 96 | + BATCH = torch.export.Dim("BATCH", min=1 * 2, max=12 * 2) |
| 97 | + _HEIGHT = torch.export.Dim("_HEIGHT", min=16, max=32) |
| 98 | + _WIDTH = torch.export.Dim("_WIDTH", min=16, max=32) |
| 99 | + HEIGHT = 4 * _HEIGHT |
| 100 | + WIDTH = 4 * _WIDTH |
| 101 | + args_dynamic_shapes = ({0: BATCH, 2: HEIGHT, 3: WIDTH}, {}) |
| 102 | + kwargs_dynamic_shapes = { |
| 103 | + "encoder_hidden_states": {0: BATCH}, |
| 104 | + "added_cond_kwargs": { |
| 105 | + "text_embeds": {0: BATCH}, |
| 106 | + "time_ids": {0: BATCH}, |
| 107 | + }, |
| 108 | + } |
| 109 | + pipe.unet.set_expected_dynamic_shape_range( |
| 110 | + args_dynamic_shapes, kwargs_dynamic_shapes |
| 111 | + ) |
| 112 | + image = pipe( |
| 113 | + prompt, |
| 114 | + negative_prompt=negative, |
| 115 | + num_inference_steps=30, |
| 116 | + height=1024, |
| 117 | + width=768, |
| 118 | + num_images_per_prompt=2, |
| 119 | + ).images[0] |
98 | 120 | image.save("./without_LoRA_mutable.jpg")
|
99 | 121 |
|
100 | 122 | # Standard Huggingface LoRA loading procedure
|
|
108 | 130 | pipe.unload_lora_weights()
|
109 | 131 |
|
110 | 132 | # Refit triggered
|
111 |
| - image = pipe(prompt, negative_prompt=negative, num_inference_steps=30).images[0] |
| 133 | + image = pipe( |
| 134 | + prompt, |
| 135 | + negative_prompt=negative, |
| 136 | + num_inference_steps=30, |
| 137 | + height=1024, |
| 138 | + width=1024, |
| 139 | + num_images_per_prompt=1, |
| 140 | + ).images[0] |
112 | 141 | image.save("./with_LoRA_mutable.jpg")
|
113 | 142 |
|
114 | 143 |
|
|
0 commit comments