|
1 |
| -from pathlib import Path |
2 |
| - |
3 | 1 | import torch
|
4 | 2 | from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
|
5 |
| -from optimum.quanto import qfloat8 |
6 | 3 | from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
7 | 4 |
|
8 | 5 | from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
|
9 |
| -from invokeai.app.invocations.model import CLIPField, T5EncoderField |
10 |
| -from invokeai.app.invocations.fields import InputField, FieldDescriptions, Input |
11 |
| -from invokeai.app.invocations.flux_text_to_image import FLUX_MODELS, QuantizedModelForTextEncoding |
| 6 | +from invokeai.app.invocations.fields import FieldDescriptions, Input, InputField |
12 | 7 | from invokeai.app.invocations.model import CLIPField, T5EncoderField
|
13 | 8 | from invokeai.app.invocations.primitives import ConditioningOutput
|
14 | 9 | from invokeai.app.services.shared.invocation_context import InvocationContext
|
@@ -40,15 +35,14 @@ class FluxTextEncoderInvocation(BaseInvocation):
|
40 | 35 | # compatible with other ConditioningOutputs.
|
41 | 36 | @torch.no_grad()
|
42 | 37 | def invoke(self, context: InvocationContext) -> ConditioningOutput:
|
43 |
| - |
44 | 38 | t5_embeddings, clip_embeddings = self._encode_prompt(context)
|
45 | 39 | conditioning_data = ConditioningFieldData(
|
46 | 40 | conditionings=[FLUXConditioningInfo(clip_embeds=clip_embeddings, t5_embeds=t5_embeddings)]
|
47 | 41 | )
|
48 | 42 |
|
49 | 43 | conditioning_name = context.conditioning.save(conditioning_data)
|
50 | 44 | return ConditioningOutput.build(conditioning_name)
|
51 |
| - |
| 45 | + |
52 | 46 | def _encode_prompt(self, context: InvocationContext) -> tuple[torch.Tensor, torch.Tensor]:
|
53 | 47 | # TODO: Determine the T5 max sequence length based on the model.
|
54 | 48 | # if self.model == "flux-schnell":
|
|
0 commit comments