Skip to content

Added fast processor for llava-next-video model #37297

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1356,6 +1356,7 @@
_import_structure["models.got_ocr2"].append("GotOcr2ImageProcessorFast")
_import_structure["models.llava"].append("LlavaImageProcessorFast")
_import_structure["models.llava_next"].append("LlavaNextImageProcessorFast")
_import_structure["models.llava_next_video"].append("LlavaNextVideoImageProcessorFast")
_import_structure["models.llava_onevision"].append("LlavaOnevisionImageProcessorFast")
_import_structure["models.phi4_multimodal"].append("Phi4MultimodalImageProcessorFast")
_import_structure["models.pixtral"].append("PixtralImageProcessorFast")
Expand Down Expand Up @@ -6648,6 +6649,7 @@
from .models.got_ocr2 import GotOcr2ImageProcessorFast
from .models.llava import LlavaImageProcessorFast
from .models.llava_next import LlavaNextImageProcessorFast
from .models.llava_next_video import LlavaNextVideoImageProcessorFast
from .models.llava_onevision import LlavaOnevisionImageProcessorFast
from .models.phi4_multimodal import Phi4MultimodalImageProcessorFast
from .models.pixtral import PixtralImageProcessorFast
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/feature_extraction_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ def __getitem__(self, item: str) -> Union[Any]:

def __getattr__(self, item: str):
try:
if item == "pixel_values" and "pixel_values_videos" in self.data:
return self.data["pixel_values_videos"]
return self.data[item]
except KeyError:
raise AttributeError
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/auto/image_processing_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@
("levit", ("LevitImageProcessor",)),
("llava", ("LlavaImageProcessor", "LlavaImageProcessorFast")),
("llava_next", ("LlavaNextImageProcessor", "LlavaNextImageProcessorFast")),
("llava_next_video", ("LlavaNextVideoImageProcessor",)),
("llava_next_video", ("LlavaNextVideoImageProcessor", "LlavaNextVideoImageProcessorFast")),
("llava_onevision", ("LlavaOnevisionImageProcessor", "LlavaOnevisionImageProcessorFast")),
("mask2former", ("Mask2FormerImageProcessor",)),
("maskformer", ("MaskFormerImageProcessor",)),
Expand Down
1 change: 1 addition & 0 deletions src/transformers/models/llava_next_video/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
if TYPE_CHECKING:
from .configuration_llava_next_video import *
from .image_processing_llava_next_video import *
from .image_processing_llava_next_video_fast import *
from .modeling_llava_next_video import *
from .processing_llava_next_video import *
else:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for LLaVa-NeXT-Video."""

from ...image_processing_utils_fast import BASE_IMAGE_PROCESSOR_FAST_DOCSTRING, BaseImageProcessorFast
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import add_start_docstrings


@add_start_docstrings(
"Constructs a fast LlavaNextVideo image processor.",
BASE_IMAGE_PROCESSOR_FAST_DOCSTRING,
)
class LlavaNextVideoImageProcessorFast(BaseImageProcessorFast):
# This generated class can be used as a starting point for the fast image processor.
# if the image processor is only used for simple augmentations, such as resizing, center cropping, rescaling, or normalizing,
# only the default values should be set in the class.
# If the image processor requires more complex augmentations, methods from BaseImageProcessorFast can be overridden.
# In most cases, only the `_preprocess` method should be overridden.

# For an example of a fast image processor requiring more complex augmentations, see `LlavaNextImageProcessorFast`.

# Default values should be checked against the slow image processor
# None values left after checking can be removed
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
image_grid_pinpoints = [[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]

__all__ = ["LlavaNextVideoImageProcessorFast"]
7 changes: 7 additions & 0 deletions src/transformers/utils/dummy_torchvision_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,13 @@ def __init__(self, *args, **kwargs):
requires_backends(self, ["torchvision"])


class LlavaNextVideoImageProcessorFast(metaclass=DummyObject):
_backends = ["torchvision"]

def __init__(self, *args, **kwargs):
requires_backends(self, ["torchvision"])


class LlavaOnevisionImageProcessorFast(metaclass=DummyObject):
_backends = ["torchvision"]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

from transformers.image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from transformers.utils import is_torch_available, is_torchvision_available, is_vision_available

from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs

Expand All @@ -32,6 +32,9 @@

from transformers import LlavaNextVideoImageProcessor

if is_torchvision_available():
from transformers import LlavaNextVideoImageProcessorFast


class LlavaNextVideoProcessingTester:
def __init__(
Expand Down Expand Up @@ -128,6 +131,7 @@ def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=F
@require_vision
class LlavaNextVideoProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = LlavaNextVideoImageProcessor if is_vision_available() else None
fast_image_processing_class = LlavaNextVideoImageProcessorFast if is_torchvision_available() else None

def setUp(self):
super().setUp()
Expand Down