Created
October 24, 2025 19:07
-
-
Save rodjjo/fc75a7439fa53cfe409032ed969c08ad to your computer and use it in GitHub Desktop.
example.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def load_models(self, loras: list = None, use_accelerator: bool = False, accelerator_steps: int = 8, for_inpainting: bool = False): | |
| modified = any([ | |
| self.transformer is None, | |
| self.vae is None, | |
| self.text_encoder is None, | |
| self.tokenizer is None, | |
| self.processor is None, | |
| self.scheduler is None, | |
| self.pipe is None, | |
| self.loras != loras, | |
| self.for_inpainting != for_inpainting, | |
| self.use_accelerator != use_accelerator, | |
| self.accelerator_steps != accelerator_steps | |
| ]) | |
| if not modified: | |
| return | |
| vae = vae_model.model | |
| if loras == self.loras and self.use_accelerator == use_accelerator and self.accelerator_steps == accelerator_steps and self.for_inpainting == for_inpainting: | |
| transformer = transformer_model.model | |
| text_encoder = qwen2_5vl_model.model | |
| tokenizer = qwen2_5vl_model.tokenizer | |
| processor = qwen2_5vl_model.vl_processor | |
| pipe = self.pipe | |
| else: | |
| transformer = None | |
| text_encoder = None | |
| tokenizer = None | |
| processor = None | |
| pipe = None | |
| if use_accelerator: | |
| self.download_accelerator() | |
| self.release_other_models() | |
| with self.hold_gc(): | |
| self.release_model() | |
| if not vae: | |
| vae_model.load_models(False) | |
| vae = vae_model.model | |
| if not text_encoder or not tokenizer: | |
| qwen2_5vl_model.load_models(False) | |
| text_encoder = qwen2_5vl_model.model | |
| tokenizer = qwen2_5vl_model.tokenizer | |
| processor = qwen2_5vl_model.vl_processor | |
| scheduler = FlowMatchEulerDiscreteScheduler( | |
| base_image_seq_len=256, | |
| base_shift=math.log(3) if use_accelerator else 0.5, | |
| invert_sigmas=False, | |
| max_image_seq_len=8192, | |
| max_shift=math.log(3) if use_accelerator else 0.9, | |
| num_train_timesteps=1000, | |
| shift=1.0, | |
| shift_terminal=None if use_accelerator else 0.02, | |
| stochastic_sampling=False, | |
| time_shift_type="exponential", | |
| use_beta_sigmas=False, | |
| use_dynamic_shifting=True, | |
| use_exponential_sigmas=False, | |
| use_karras_sigmas=False | |
| ) | |
| if not transformer: | |
| transformer_model.load_models(False) | |
| transformer = transformer_model.model | |
| if for_inpainting: | |
| pipe = QwenImageEditInpaintPipeline( | |
| transformer=transformer, | |
| text_encoder=text_encoder, | |
| processor=processor, | |
| tokenizer=tokenizer, | |
| scheduler=scheduler, | |
| vae=vae | |
| ) | |
| else: | |
| pipe = QwenImageEditPipeline( | |
| transformer=transformer, | |
| text_encoder=text_encoder, | |
| processor=processor, | |
| tokenizer=tokenizer, | |
| scheduler=scheduler, | |
| vae=vae | |
| ) | |
| pipe.progress_bar = lambda total: ProgressBar(total=total) | |
| pipe.enable_sequential_cpu_offload() | |
| if loras or use_accelerator: | |
| loaded_loras = [] | |
| balanced_loras = balance_loras(loras) if loras else [] | |
| for index, lora in enumerate(balanced_loras): | |
| logger.info(f"Loading LoRA: {lora['name']} with strength {lora['strength']}") | |
| lora_name = lora["name"] | |
| lora_strength = lora["strength"] | |
| lora_file = os.path.join(self.qwen_image_lora_dir(), f"{lora_name}.safetensors") | |
| if not os.path.exists(lora_file): | |
| raise FileNotFoundError(f"LoRA file not found: {lora_file}") | |
| lora_name = f"lora_{index}" | |
| pipe.load_lora_weights(lora_file, adapter_name=lora_name) | |
| loaded_loras.append((lora_name, lora_strength)) | |
| if use_accelerator: | |
| if accelerator_steps == 4: | |
| lora_path = os.path.join(self.qwen_image_accelerator_dir(), ACCELERATOR_LORA_NAME_4STEPS) | |
| else: | |
| lora_path = os.path.join(self.qwen_image_accelerator_dir(), ACCELERATOR_LORA_NAME_8STEPS) | |
| pipe.load_lora_weights(lora_path, adapter_name="lora_accelerator") | |
| loaded_loras.append(("lora_accelerator", 1.0)) | |
| if loaded_loras: | |
| lora_adapters = [name for name, _ in loaded_loras] | |
| lora_weights = [weight for _, weight in loaded_loras] | |
| pipe.fuse_lora(adapter_names=lora_adapters, adapter_weights=lora_weights) | |
| pipe.delete_adapters(adapter_names=lora_adapters) | |
| #self.enable_sequential_cpu_offload(transformer) | |
| #self.enable_sequential_cpu_offload(text_encoder) | |
| self.for_inpainting = for_inpainting | |
| self.transformer = transformer | |
| self.vae = vae | |
| self.text_encoder = text_encoder | |
| self.tokenizer = tokenizer | |
| self.processor = processor | |
| self.scheduler = scheduler | |
| self.loras = loras | |
| self.use_accelerator = use_accelerator | |
| self.accelerator_steps = accelerator_steps | |
| self.pipe = pipe | |
| def create_pipeline(self): | |
| return self.pipe |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment