Skip to content

Commit a5a1620

Browse files
authored
Merge pull request kohya-ss#2226 from kohya-ss/fix-hunyuan-image-batch-gen-error
fix: error on batch generation closes kohya-ss#2209
2 parents 5e366ac + a33cad7 commit a5a1620

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

hunyuan_image_minimal_inference.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,7 +1001,7 @@ def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) ->
10011001
all_precomputed_text_data.append(text_data)
10021002

10031003
# Models should be removed from device after prepare_text_inputs
1004-
del tokenizer_batch, text_encoder_batch, temp_shared_models_txt, conds_cache_batch
1004+
del tokenizer_vlm, text_encoder_vlm_batch, tokenizer_byt5, text_encoder_byt5_batch, temp_shared_models_txt, conds_cache_batch
10051005
gc.collect() # Force cleanup of Text Encoder from GPU memory
10061006
clean_memory_on_device(device)
10071007

@@ -1075,7 +1075,7 @@ def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) ->
10751075
# save_output expects latent to be [BCTHW] or [CTHW]. generate returns [BCTHW] (batch size 1).
10761076
# latent[0] is correct if generate returns it with batch dim.
10771077
# The latent from generate is (1, C, T, H, W)
1078-
save_output(current_args, vae_for_batch, latent[0], device) # Pass vae_for_batch
1078+
save_output(current_args, vae_for_batch, latent, device) # Pass vae_for_batch
10791079

10801080
vae_for_batch.to("cpu") # Move VAE back to CPU
10811081

0 commit comments

Comments
 (0)