Skip to content

Commit 0e14fc6

Browse files
committed
Run Ruff
1 parent 027ac63 commit 0e14fc6

File tree

4 files changed

+5
-17
lines changed

4 files changed

+5
-17
lines changed

invokeai/app/invocations/flux_text_encoder.py

+2-8
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,9 @@
1-
from pathlib import Path
2-
31
import torch
42
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
5-
from optimum.quanto import qfloat8
63
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
74

85
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
9-
from invokeai.app.invocations.model import CLIPField, T5EncoderField
10-
from invokeai.app.invocations.fields import InputField, FieldDescriptions, Input
11-
from invokeai.app.invocations.flux_text_to_image import FLUX_MODELS, QuantizedModelForTextEncoding
6+
from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField
127
from invokeai.app.invocations.model import CLIPField, T5EncoderField
138
from invokeai.app.invocations.primitives import ConditioningOutput
149
from invokeai.app.services.shared.invocation_context import InvocationContext
@@ -40,15 +35,14 @@ class FluxTextEncoderInvocation(BaseInvocation):
4035
# compatible with other ConditioningOutputs.
4136
@torch.no_grad()
4237
def invoke(self, context: InvocationContext) -> ConditioningOutput:
43-
4438
t5_embeddings, clip_embeddings = self._encode_prompt(context)
4539
conditioning_data = ConditioningFieldData(
4640
conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
4741
)
4842

4943
conditioning_name = context.conditioning.save(conditioning_data)
5044
return ConditioningOutput.build(conditioning_name)
51-
45+
5246
def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
5347
# TODO: Determine the T5 max sequence length based on the model.
5448
# if self.model == "flux-schnell":

invokeai/app/invocations/flux_text_to_image.py

+1-6
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,9 @@
1-
from pathlib import Path
21
from typing import Literal
3-
from pydantic import Field
42

53
import torch
64
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
75
from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
86
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
9-
from invokeai.app.invocations.model import TransformerField, VAEField
10-
from optimum.quanto import qfloat8
117
from PIL import Image
128
from transformers.models.auto import AutoModelForTextEncoding
139

@@ -19,8 +15,8 @@
1915
InputField,
2016
WithBoard,
2117
WithMetadata,
22-
UIType,
2318
)
19+
from invokeai.app.invocations.model import TransformerField, VAEField
2420
from invokeai.app.invocations.primitives import ImageOutput
2521
from invokeai.app.services.shared.invocation_context import InvocationContext
2622
from invokeai.backend.quantization.fast_quantized_diffusion_model import FastQuantizedDiffusersModel
@@ -72,7 +68,6 @@ class FluxTextToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
7268

7369
@torch.no_grad()
7470
def invoke(self, context: InvocationContext) -> ImageOutput:
75-
7671
# Load the conditioning data.
7772
cond_data = context.conditioning.load(self.positive_text_conditioning.conditioning_name)
7873
assert len(cond_data.conditionings) == 1

invokeai/backend/quantization/fast_quantized_diffusion_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from typing import Union
44

55
from diffusers.models.model_loading_utils import load_state_dict
6+
from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
67
from diffusers.utils import (
78
CONFIG_NAME,
89
SAFE_WEIGHTS_INDEX_NAME,
@@ -12,7 +13,6 @@
1213
)
1314
from optimum.quanto.models import QuantizedDiffusersModel
1415
from optimum.quanto.models.shared_dict import ShardedStateDict
15-
from diffusers.models.transformers.transformer_flux import FluxTransformer2DModel
1616

1717
from invokeai.backend.requantize import requantize
1818

invokeai/backend/quantization/fast_quantized_transformers_model.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,13 @@
11
import json
22
import os
3-
import torch
43
from typing import Union
54

65
from optimum.quanto.models import QuantizedTransformersModel
76
from optimum.quanto.models.shared_dict import ShardedStateDict
87
from transformers import AutoConfig
98
from transformers.modeling_utils import get_checkpoint_shard_files, load_state_dict
10-
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available
119
from transformers.models.auto import AutoModelForTextEncoding
10+
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available
1211

1312
from invokeai.backend.requantize import requantize
1413

0 commit comments

Comments
 (0)