| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import numpy as np |
| | import torch |
| | from transformers import Qwen2_5_VLProcessor |
| | from transformers.feature_extraction_utils import BatchFeature |
| | from transformers.models.qwen2_5_vl.processing_qwen2_5_vl import ( |
| | Qwen2_5_VLProcessorKwargs, |
| | ) |
| |
|
| |
|
| | class TimeLensProcessor(Qwen2_5_VLProcessor): |
| | r""" |
| | Constructs a Qwen2.5-VL processor which wraps a Qwen2.5-VL image processor and a Qwen2 tokenizer into a single processor. |
| | [`Qwen2_5_VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the |
| | [`~Qwen2_5_VLProcessor.__call__`] and [`~Qwen2_5_VLProcessor.decode`] for more information. |
| | Args: |
| | image_processor ([`Qwen2VLImageProcessor`], *optional*): |
| | The image processor is a required input. |
| | tokenizer ([`Qwen2TokenizerFast`], *optional*): |
| | The tokenizer is a required input. |
| | video_processor ([`Qwen2_5_VLVideoProcessor`], *optional*): |
| | The video processor is a required input. |
| | chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages |
| | in a chat into a tokenizable string. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | image_processor=None, |
| | tokenizer=None, |
| | video_processor=None, |
| | chat_template=None, |
| | **kwargs, |
| | ): |
| | super().__init__( |
| | image_processor, tokenizer, video_processor, chat_template, **kwargs |
| | ) |
| | |
| | self.vision_start = ( |
| | "<|vision_start|>" |
| | if not hasattr(tokenizer, "vision_start") |
| | else tokenizer.vision_start |
| | ) |
| | self.vision_end = ( |
| | "<|vision_end|>" |
| | if not hasattr(tokenizer, "vision_end") |
| | else tokenizer.vision_end |
| | ) |
| | |
| |
|
| | def __call__( |
| | self, |
| | images=None, |
| | text=None, |
| | videos=None, |
| | **kwargs, |
| | ) -> BatchFeature: |
| | """ |
| | Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` |
| | and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode |
| | the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to |
| | Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`. |
| | |
| | Args: |
| | images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): |
| | The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch |
| | tensor. Both channels-first and channels-last formats are supported. |
| | text (`str`, `list[str]`, `list[list[str]]`): |
| | The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings |
| | (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set |
| | `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). |
| | videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): |
| | The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch |
| | tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported. |
| | return_tensors (`str` or [`~utils.TensorType`], *optional*): |
| | If set, will return tensors of a particular framework. Acceptable values are: |
| | - `'tf'`: Return TensorFlow `tf.constant` objects. |
| | - `'pt'`: Return PyTorch `torch.Tensor` objects. |
| | - `'np'`: Return NumPy `np.ndarray` objects. |
| | - `'jax'`: Return JAX `jnp.ndarray` objects. |
| | |
| | Returns: |
| | [`BatchFeature`]: A [`BatchFeature`] with the following fields: |
| | |
| | - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. |
| | - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when |
| | `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not |
| | `None`). |
| | - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. |
| | - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`. |
| | - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`. |
| | - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`. |
| | - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`. |
| | """ |
| | output_kwargs = self._merge_kwargs( |
| | Qwen2_5_VLProcessorKwargs, |
| | tokenizer_init_kwargs=self.tokenizer.init_kwargs, |
| | **kwargs, |
| | ) |
| |
|
| | image_inputs = videos_inputs = {} |
| | if images is not None: |
| | image_inputs = self.image_processor( |
| | images=images, **output_kwargs["images_kwargs"] |
| | ) |
| | image_grid_thw = image_inputs["image_grid_thw"] |
| |
|
| | if videos is not None: |
| | |
| | |
| | videos, metadata = [v[0] for v in videos], [v[1] for v in videos] |
| | |
| | for cur_video_tensor in videos: |
| | cur_video_tensor[1::2] = cur_video_tensor[::2] |
| | |
| | frames_timestamps = [ |
| | [ |
| | idx / cur_metadata["fps"] |
| | for idx in cur_metadata["frames_indices"][::2] |
| | ] |
| | for cur_metadata in metadata |
| | ] |
| |
|
| | videos_inputs = self.video_processor( |
| | videos=videos, **output_kwargs["videos_kwargs"] |
| | ) |
| | video_grid_thw = videos_inputs["video_grid_thw"] |
| | |
| |
|
| | if not isinstance(text, list): |
| | text = [text] |
| |
|
| | text = text.copy() |
| | if images is not None: |
| | merge_length = self.image_processor.merge_size**2 |
| | index = 0 |
| | for i in range(len(text)): |
| | while self.image_token in text[i]: |
| | num_image_tokens = image_grid_thw[index].prod() // merge_length |
| | text[i] = text[i].replace( |
| | self.image_token, "<|placeholder|>" * num_image_tokens, 1 |
| | ) |
| | index += 1 |
| | text[i] = text[i].replace("<|placeholder|>", self.image_token) |
| |
|
| | if videos is not None: |
| | merge_length = self.video_processor.merge_size**2 |
| | index = 0 |
| | |
| | for i in range(len(text)): |
| | while self.video_token in text[i]: |
| | cur_video_tokens = "" |
| | num_tokens_per_frame = ( |
| | video_grid_thw[index][1:].prod() // merge_length |
| | ) |
| | per_frame_tokens = ( |
| | self.vision_start |
| | + "<|placeholder|>" * num_tokens_per_frame |
| | + self.vision_end |
| | ) |
| | for cur_frames_timestamp in frames_timestamps[index]: |
| | cur_video_tokens += ( |
| | f"{cur_frames_timestamp:.1f}s: " + per_frame_tokens |
| | ) |
| |
|
| | text[i] = text[i].replace( |
| | self.vision_start + self.video_token + self.vision_end, |
| | cur_video_tokens, |
| | 1, |
| | ) |
| | index += 1 |
| | text[i] = text[i].replace("<|placeholder|>", self.image_token) |
| | |
| | image_grid_thw = torch.tensor( |
| | [ |
| | [1, grid_h, grid_w] |
| | for grid_t, grid_h, grid_w in video_grid_thw |
| | for _ in range(grid_t) |
| | ], |
| | dtype=torch.long, |
| | ) |
| |
|
| | image_inputs = { |
| | "pixel_values": videos_inputs[ |
| | "pixel_values_videos" |
| | ], |
| | "image_grid_thw": image_grid_thw, |
| | } |
| | videos_inputs = {} |
| | |
| |
|
| | return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None) |
| | return_mm_token_type_ids = output_kwargs["text_kwargs"].pop( |
| | "return_mm_token_type_ids", None |
| | ) |
| | text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"]) |
| | self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"]) |
| |
|
| | if return_mm_token_type_ids: |
| | array_ids = np.array(text_inputs["input_ids"]) |
| | mm_token_type_ids = np.zeros_like(text_inputs["input_ids"]) |
| | mm_token_type_ids[array_ids == self.image_token_id] = 1 |
| | text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist() |
| |
|
| | return BatchFeature( |
| | data={**text_inputs, **image_inputs, **videos_inputs}, |
| | tensor_type=return_tensors, |
| | ) |
| |
|